类org.apache.lucene.index.TermDocs源码实例Demo

下面列出了怎么用org.apache.lucene.index.TermDocs的API类实例代码及写法,或者点击链接到github查看源代码。

protected boolean locateContainer(String nodeRef, IndexReader reader)
{
    boolean found = false;
    try
    {
        TermDocs td = reader.termDocs(new Term("ID", nodeRef));
        while (td.next())
        {
            int doc = td.doc();
            Document document = reader.document(doc);
            if (document.getField("ISCONTAINER") != null)
            {
                found = true;
                break;
            }
        }
        td.close();
    }
    catch (IOException e)
    {
        throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e);
    }
    return found;        
}
 
源代码2 项目: semanticvectors   文件: TermFreqFilter.java
public boolean filter(Term t) 
{
	int freq = 0;
	TermDocs tDocs;
	
	try {
	    tDocs = indexReader.termDocs(t);
	    while( tDocs.next() ){
	      freq += tDocs.freq();
	    }
	    if( freq < minFreq ){
	      return false;
	    }
	}
	catch(Exception e)
	{
		e.printStackTrace();
	}
	
    return true;		
}
 
final Scorer scorer(IndexReader reader) throws IOException {
   if (terms.size() == 0)			  // optimize zero-term case
     return null;
   if (terms.size() == 1) {			  // optimize one-term case
     Term term = (Term)terms.elementAt(0);
     TermDocs docs = reader.termDocs(term);
     if (docs == null)
return null;
     return new TermScorer(docs, reader.norms(term.field()), weight);
   }

   TermPositions[] tps = new TermPositions[terms.size()];
   for (int i = 0; i < terms.size(); i++) {
     TermPositions p = reader.termPositions((Term)terms.elementAt(i));
     if (p == null)
return null;
     tps[i] = p;
   }

   if (slop == 0)				  // optimize exact case
     return new ExactPhraseScorer(tps, reader.norms(field), weight);
   else
     return
new SloppyPhraseScorer(tps, slop, reader.norms(field), weight);

 }
 
final Scorer scorer(IndexReader reader) throws IOException {
   if (terms.size() == 0)			  // optimize zero-term case
     return null;
   if (terms.size() == 1) {			  // optimize one-term case
     Term term = (Term)terms.elementAt(0);
     TermDocs docs = reader.termDocs(term);
     if (docs == null)
return null;
     return new TermScorer(docs, reader.norms(term.field()), weight);
   }

   TermPositions[] tps = new TermPositions[terms.size()];
   for (int i = 0; i < terms.size(); i++) {
     TermPositions p = reader.termPositions((Term)terms.elementAt(i));
     if (p == null)
return null;
     tps[i] = p;
   }

   if (slop == 0)				  // optimize exact case
     return new ExactPhraseScorer(tps, reader.norms(field), weight);
   else
     return
new SloppyPhraseScorer(tps, slop, reader.norms(field), weight);

 }
 
protected static Set<String> deletePrimary(Collection<String> nodeRefs, IndexReader reader, boolean delete)
        throws LuceneIndexException
{

    Set<String> refs = new LinkedHashSet<String>();

    for (String nodeRef : nodeRefs)
    {

        try
        {
            TermDocs td = reader.termDocs(new Term("PRIMARYPARENT", nodeRef));
            while (td.next())
            {
                int doc = td.doc();
                Document document = reader.document(doc);
                String[] ids = document.getValues("ID");
                refs.add(ids[ids.length - 1]);
                if (delete)
                {
                    reader.deleteDocument(doc);
                }
            }
            td.close();
        }
        catch (IOException e)
        {
            throw new LuceneIndexException("Failed to delete node by primary parent for " + nodeRef, e);
        }
    }

    return refs;

}
 
protected static Set<String> deleteReference(Collection<String> nodeRefs, IndexReader reader, boolean delete)
        throws LuceneIndexException
{

    Set<String> refs = new LinkedHashSet<String>();

    for (String nodeRef : nodeRefs)
    {

        try
        {
            TermDocs td = reader.termDocs(new Term("PARENT", nodeRef));
            while (td.next())
            {
                int doc = td.doc();
                Document document = reader.document(doc);
                String[] ids = document.getValues("ID");
                refs.add(ids[ids.length - 1]);
                if (delete)
                {
                    reader.deleteDocument(doc);
                }
            }
            td.close();
        }
        catch (IOException e)
        {
            throw new LuceneIndexException("Failed to delete node by parent for " + nodeRef, e);
        }
    }

    return refs;

}
 
protected static Set<String> deleteContainerAndBelow(String nodeRef, IndexReader reader, boolean delete,
        boolean cascade) throws LuceneIndexException
{
    Set<String> refs = new LinkedHashSet<String>();

    try
    {
        if (delete)
        {
            reader.deleteDocuments(new Term("ID", nodeRef));
        }
        refs.add(nodeRef);
        if (cascade)
        {
            TermDocs td = reader.termDocs(new Term("ANCESTOR", nodeRef));
            while (td.next())
            {
                int doc = td.doc();
                Document document = reader.document(doc);
                String[] ids = document.getValues("ID");
                refs.add(ids[ids.length - 1]);
                if (delete)
                {
                    reader.deleteDocument(doc);
                }
            }
            td.close();
        }
    }
    catch (IOException e)
    {
        throw new LuceneIndexException("Failed to delete container and below for " + nodeRef, e);
    }
    return refs;
}
 
源代码8 项目: alfresco-repository   文件: TermScorer.java
/** Construct a <code>TermScorer</code>.
 * @param weight The weight of the <code>Term</code> in the query.
 * @param td An iterator over the documents matching the <code>Term</code>.
 * @param similarity The </code>Similarity</code> implementation to be used for score computations.
 * @param norms The field norms of the document fields for the <code>Term</code>.
 */
TermScorer(Weight weight, TermDocs td, Similarity similarity,
           byte[] norms) {
  super(similarity);
  this.weight = weight;
  this.termDocs = td;
  this.norms = norms;
  this.weightValue = weight.getValue();

  for (int i = 0; i < SCORE_CACHE_SIZE; i++)
    scoreCache[i] = getSimilarity().tf(i) * weightValue;
}
 
源代码9 项目: alfresco-repository   文件: TermQuery.java
public Scorer scorer(IndexReader reader) throws IOException {
  TermDocs termDocs = reader.termDocs(term);

  if (termDocs == null)
    return null;

  String field = term.field();
  return new TermScorer(this, termDocs, similarity,
          reader.hasNorms(field) ? reader.norms(field) : null);
}
 
源代码10 项目: imhotep   文件: LuceneUnsortedIntTermDocIterator.java
static LuceneUnsortedIntTermDocIterator create(final IndexReader r, final String field) throws IOException {
    final TermEnum terms = r.terms(new Term(field, ""));
    final TermDocs termDocs;
    try {
        termDocs = r.termDocs();
    } catch (IOException e) {
        try {
            terms.close();
        } catch (IOException e1) {
            log.error("error closing TermEnum", e1);
        }
        throw e;
    }
    return new LuceneUnsortedIntTermDocIterator(field, terms, termDocs);
}
 
/**
 * @param id String
 * @param in TermDocs
 */
public FilterTermDocs(String id, TermDocs in)
{
    this.in = in;
}
 
@Override
public TermDocs termDocs() throws IOException
{
    return new FilterTermDocs(id, super.termDocs());
}
 
源代码13 项目: imhotep   文件: LuceneUnsortedIntTermDocIterator.java
LuceneUnsortedIntTermDocIterator(String field, TermEnum terms, TermDocs termDocs) {
    this.field = field.intern();
    this.terms = terms;
    this.termDocs = termDocs;
}
 
源代码14 项目: imhotep   文件: LuceneDocIdStream.java
public LuceneDocIdStream(final TermDocs termDocs) {
    this.termDocs = termDocs;
}
 
 类所在包
 类方法
 同包方法