下面列出了怎么用org.apache.lucene.search.BulkScorer的API类实例代码及写法,或者点击链接到github查看源代码。
/**
* Lower-level suggest API.
* Collects completion hits through <code>collector</code> for <code>query</code>.
*
* <p>{@link TopSuggestDocsCollector#collect(int, CharSequence, CharSequence, float)}
* is called for every matching completion hit.
*/
public void suggest(CompletionQuery query, TopSuggestDocsCollector collector) throws IOException {
// TODO use IndexSearcher.rewrite instead
// have to implement equals() and hashCode() in CompletionQuerys and co
query = (CompletionQuery) query.rewrite(getIndexReader());
Weight weight = query.createWeight(this, collector.scoreMode(), 1f);
for (LeafReaderContext context : getIndexReader().leaves()) {
BulkScorer scorer = weight.bulkScorer(context);
if (scorer != null) {
try {
scorer.score(collector.getLeafCollector(context), context.reader().getLiveDocs());
} catch (CollectionTerminatedException e) {
// collection was terminated prematurely
// continue with the following leaf
}
}
}
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
// We use the default bulk scorer instead of the specialized one. The reason
// is that Lucene's BulkScorers do everything at once: finding matches,
// scoring them and calling the collector, so they make it impossible to
// see where time is spent, which is the purpose of query profiling.
// The default bulk scorer will pull a scorer and iterate over matches,
// this might be a significantly different execution path for some queries
// like disjunctions, but in general this is what is done anyway
return super.bulkScorer(context);
}
@Override
public BulkScorer bulkScorer(final LeafReaderContext context) throws IOException {
final LeafReader reader = context.reader();
final Terms terms;
final NRTSuggester suggester;
if ((terms = reader.terms(completionQuery.getField())) == null) {
return null;
}
if (terms instanceof CompletionTerms) {
CompletionTerms completionTerms = (CompletionTerms) terms;
if ((suggester = completionTerms.suggester()) == null) {
// a segment can have a null suggester
// i.e. no FST was built
return null;
}
} else {
throw new IllegalArgumentException(completionQuery.getField() + " is not a SuggestField");
}
BitsProducer filter = completionQuery.getFilter();
Bits filteredDocs = null;
if (filter != null) {
filteredDocs = filter.getBits(context);
if (filteredDocs.getClass() == Bits.MatchNoBits.class) {
return null;
}
}
return new CompletionScorer(this, suggester, reader, filteredDocs, filter != null, automaton);
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
final SegState weightOrBitSet = getSegState(context);
if (weightOrBitSet.weight != null) {
return weightOrBitSet.weight.bulkScorer(context);
} else {
final Scorer scorer = scorer(weightOrBitSet.set);
if (scorer == null) {
return null;
}
return new DefaultBulkScorer(scorer);
}
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
// We use the default bulk scorer instead of the specialized one. The reason
// is that Lucene's BulkScorers do everything at once: finding matches,
// scoring them and calling the collector, so they make it impossible to
// see where time is spent, which is the purpose of query profiling.
// The default bulk scorer will pull a scorer and iterate over matches,
// this might be a significantly different execution path for some queries
// like disjunctions, but in general this is what is done anyway
return super.bulkScorer(context);
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
shardKeyMap.add(context.reader());
return in.bulkScorer(context);
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
shardKeyMap.add(context.reader());
return in.bulkScorer(context);
}