下面列出了org.apache.lucene.search.ConstantScoreWeight#org.apache.lucene.search.Scorer 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/** Scorer used for WITHIN and DISJOINT **/
private Scorer getDenseScorer(LeafReader reader, Weight weight, final float boost, ScoreMode scoreMode) throws IOException {
final FixedBitSet result = new FixedBitSet(reader.maxDoc());
final long[] cost;
if (values.getDocCount() == reader.maxDoc()) {
cost = new long[]{values.size()};
// In this case we can spare one visit to the tree, all documents
// are potential matches
result.set(0, reader.maxDoc());
// Remove false positives
values.intersect(getInverseDenseVisitor(query, result, cost));
} else {
cost = new long[]{0};
// Get potential documents.
final FixedBitSet excluded = new FixedBitSet(reader.maxDoc());
values.intersect(getDenseVisitor(query, result, excluded, cost));
result.andNot(excluded);
// Remove false positives, we only care about the inner nodes as intersecting
// leaf nodes have been already taken into account. Unfortunately this
// process still reads the leaf nodes.
values.intersect(getShallowInverseDenseVisitor(query, result));
}
assert cost[0] > 0;
final DocIdSetIterator iterator = new BitSetIterator(result, cost[0]);
return new ConstantScoreScorer(weight, boost, scoreMode, iterator);
}
@Override
@SuppressWarnings({"rawtypes"})
public DocIdSet getDocIdSet(final Map context, final LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
// NB the IndexSearcher parameter here can be null because Filter Weights don't
// actually use it.
Weight weight = createWeight(null, ScoreMode.COMPLETE, 1);
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
@SuppressWarnings({"unchecked"})
Scorer scorer = valueSource.getValues(context, readerContext).getRangeScorer(weight, readerContext, lowerVal, upperVal, includeLower, includeUpper);
return scorer == null ? null : scorer.iterator();
}
@Override
public Bits bits() {
return null; // don't use random access
}
@Override
public long ramBytesUsed() {
return 0L;
}
}, acceptDocs);
}
@Override
public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(BitSetProducerQuery.this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
BitSet bitSet = bitSetProducer.getBitSet(context);
if (bitSet == null) {
return null;
}
DocIdSetIterator disi = new BitSetIterator(bitSet, bitSet.approximateCardinality());
return new ConstantScoreScorer(this, boost, scoreMode, disi);
}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return getCache();
}
};
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
SortedDocValues values = DocValues.getSorted(context.reader(), joinField);
if (values == null) {
return null;
}
Scorer approximationScorer = approximationWeight.scorer(context);
if (approximationScorer == null) {
return null;
}
if (globalOrds != null) {
return new OrdinalMapScorer(this, score(), foundOrds, values, approximationScorer.iterator(), globalOrds.getGlobalOrds(context.ord));
} {
return new SegmentOrdinalScorer(this, score(), foundOrds, values, approximationScorer.iterator());
}
}
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
String name = entry.getKey();
Query filter = entry.getValue();
final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
final Scorer scorer = weight.scorer(hitContext.readerContext());
if (scorer == null) {
continue;
}
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
if (twoPhase == null) {
if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
matchedQueries.add(name);
}
} else {
if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
matchedQueries.add(name);
}
}
}
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final Scorer parentScorer = parentWeight.scorer(context);
// no matches
if (parentScorer == null) {
return null;
}
BitSet parents = parentsFilter.getBitSet(context);
if (parents == null) {
// No matches
return null;
}
int firstParentDoc = parentScorer.iterator().nextDoc();
if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
// No matches
return null;
}
return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, BitSet parentBits, int currentParentPointer) {
super(weight);
this.parentScorer = parentScorer;
this.parentBits = parentBits;
this.currentParentPointer = currentParentPointer;
if (currentParentPointer == 0) {
currentChildPointer = 0;
} else {
this.currentChildPointer = this.parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, we delete from doc 0
currentChildPointer = 0;
} else {
currentChildPointer++; // we only care about children
}
}
currentDoc = currentChildPointer;
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
// we forcefully apply live docs here so that deleted children don't give matching parents
childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
if (Lucene.isEmpty(childrenDocSet)) {
return null;
}
final DocIdSetIterator childIterator = childrenDocSet.iterator();
if (childIterator == null) {
return null;
}
SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (bytesValues == null) {
return null;
}
return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
if (Lucene.isEmpty(childrenDocIdSet)) {
return null;
}
SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
if (globalValues != null) {
// we forcefully apply live docs here so that deleted children don't give matching parents
childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
if (innerIterator != null) {
ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
innerIterator, parentOrds, globalValues
);
return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
}
}
return null;
}
@Override
public float score() throws IOException {
reset();
freq = 0;
if (targetDoc == activeDoc) {
for (final Scorer scorer : featureScorers) {
if (scorer.docID() == activeDoc) {
freq++;
Feature.FeatureWeight scFW = (Feature.FeatureWeight) scorer.getWeight();
final int featureId = scFW.getIndex();
featuresInfo[featureId].setValue(scorer.score());
featuresInfo[featureId].setUsed(true);
}
}
}
return makeNormalizedFeaturesAndScore();
}
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
// The default implementation would delegate to the joinQuery's Weight, which
// matches on children. We need to match on the parent instead
Scorer scorer = scorer(context);
if (scorer == null) {
return null;
}
final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
if (twoPhase == null) {
if (scorer.iterator().advance(doc) != doc) {
return null;
}
}
else {
if (twoPhase.approximation().advance(doc) != doc || twoPhase.matches() == false) {
return null;
}
}
return MatchesUtils.MATCH_WITH_NO_TERMS;
}
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits,
IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException,
ModelException {
final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
.leaves();
final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
final LeafReaderContext context = leafContexts.get(n);
final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;
final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
final Scorer scorer = weight.scorer(context);
// rerank using the field final-score
scorer.iterator().advance(deBasedDoc);
scorer.score();
// assertEquals(42.0f, score, 0.0001);
// assertTrue(weight instanceof AssertingWeight);
// (AssertingIndexSearcher)
assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
return modelWeight;
}
/**
* @param doc SolrDocument to check
* @param idField field where the id is stored
* @param fieldType type of id field
* @param filterQuery Query to filter by
* @param searcher SolrIndexSearcher on which to apply the filter query
* @returns the internal docid, or -1 if doc is not found or doesn't match filter
*/
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
Query filterQuery, SolrIndexSearcher searcher) throws IOException {
int docid = -1;
Field f = (Field)doc.getFieldValue(idField.getName());
String idStr = f.stringValue();
BytesRef idBytes = new BytesRef();
fieldType.readableToIndexed(idStr, idBytes);
// get the internal document id
long segAndId = searcher.lookupId(idBytes);
// if docid is valid, run it through the filter
if (segAndId >= 0) {
int segid = (int) segAndId;
AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
docid = segid + ctx.docBase;
Weight weight = filterQuery.createWeight(searcher);
Scorer scorer = weight.scorer(ctx, null);
if (scorer == null || segid != scorer.advance(segid)) {
// filter doesn't match.
docid = -1;
}
}
return docid;
}
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
TwoPhaseIterator it = predicateValueSource.iterator(context, approximation);
return new ConstantScoreScorer(this, score(), scoreMode, it);
}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return predicateValueSource.isCacheable(ctx);
}
};
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
Bindings bindings = new Bindings(){
@Override
public DoubleValuesSource getDoubleValuesSource(String name) {
Double queryParamValue = queryParamValues.get(name);
if (queryParamValue != null) {
return DoubleValuesSource.constant(queryParamValue);
}
return new FVDoubleValuesSource(vectorSupplier, features.featureOrdinal(name));
}
};
DocIdSetIterator iterator = DocIdSetIterator.all(context.reader().maxDoc());
DoubleValuesSource src = expression.getDoubleValuesSource(bindings);
DoubleValues values = src.getValues(context, null);
return new DValScorer(this, iterator, values);
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
Scorer in = inner.scorer(context);
if (in == null)
return null;
DoubleValues scores = valueSource.getValues(context, DoubleValuesSource.fromScorer(in));
return new FilterScorer(in) {
@Override
public float score() throws IOException {
if (scores.advanceExact(docID())) {
double factor = scores.doubleValue();
if (factor >= 0) {
return (float) (factor * boost);
}
}
// default: missing value, negative value or NaN
return 0;
}
@Override
public float getMaxScore(int upTo) throws IOException {
return Float.POSITIVE_INFINITY;
}
};
}
@Benchmark
public LongObjectHashMap<Long> measureGroupingOnNumericDocValues() throws Exception {
Weight weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
LeafReaderContext leaf = searcher.getTopReaderContext().leaves().get(0);
Scorer scorer = weight.scorer(leaf);
NumericDocValues docValues = DocValues.getNumeric(leaf.reader(), "x");
DocIdSetIterator docIt = scorer.iterator();
LongObjectHashMap<Long> sumByKey = new LongObjectHashMap<>();
for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) {
if (docValues.advanceExact(docId)) {
long number = docValues.longValue();
sumByKey.compute(number, (key, oldValue) -> {
if (oldValue == null) {
return number;
} else {
return oldValue + number;
}
});
}
}
return sumByKey;
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
ScorerSupplier scorerSupplier = scorerSupplier(context);
if (scorerSupplier == null) {
return null;
}
return scorerSupplier.get(Long.MAX_VALUE);
}
/**
* not a direct test of NearSpans, but a demonstration of how/when
* this causes problems
*/
public void testSpanNearScorerSkipTo1() throws Exception {
SpanNearQuery q = makeQuery();
Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
LeafReaderContext leave = topReaderContext.leaves().get(0);
Scorer s = w.scorer(leave);
assertEquals(1, s.iterator().advance(1));
}
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
final ScorerSupplier childScorerSupplier = in.scorerSupplier(context);
if (childScorerSupplier == null) {
return null;
}
// NOTE: this does not take accept docs into account, the responsibility
// to not match deleted docs is on the scorer
final BitSet parents = parentsFilter.getBitSet(context);
if (parents == null) {
// No matches
return null;
}
return new ScorerSupplier() {
@Override
public Scorer get(long leadCost) throws IOException {
return new BlockJoinScorer(BlockJoinWeight.this, childScorerSupplier.get(leadCost), parents, scoreMode);
}
@Override
public long cost() {
return childScorerSupplier.cost();
}
};
}
@Override
public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
CodecReader reader = readerIOSupplier.get();
/* we only need a single hit to keep it no need for soft deletes to be checked*/
Scorer scorer = getScorer(retentionQuerySupplier.get(), FilterCodecReader.wrapLiveDocs(reader, null, reader.maxDoc()));
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
boolean atLeastOneHit = iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
return atLeastOneHit;
}
return super.keepFullyDeletedSegment(readerIOSupplier) ;
}
protected SuperScorer(Scorer scorer, OpenBitSet bitSet, String originalQueryStr, ScoreType scoreType) {
super(scorer.getWeight());
this.scorer = scorer;
this.bitSet = bitSet;
this.originalQueryStr = originalQueryStr;
this.scoreType = scoreType;
}
@Override
public Scorer scorer(final LeafReaderContext context) throws IOException {
assert termStates != null && termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context))
: "The top-reader used to create Weight is not the same as the current reader's top-reader: " + ReaderUtil.getTopLevelContext(context);
final TermsEnum termsEnum = getTermsEnum(context);
if (termsEnum == null) {
return null;
}
PostingsEnum docs = termsEnum.postings(null, PostingsEnum.NONE);
assert docs != null;
return new TermBoostScorer(this, docs, score);
}
@Override
protected void doPostCollection() throws IOException {
IndexReader indexReader = context().searchContext().searcher().getIndexReader();
for (LeafReaderContext ctx : indexReader.leaves()) {
Scorer childDocsScorer = childFilter.scorer(ctx);
if (childDocsScorer == null) {
continue;
}
DocIdSetIterator childDocsIter = childDocsScorer.iterator();
final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
// Set the scorer, since we now replay only the child docIds
sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f));
final Bits liveDocs = ctx.reader().getLiveDocs();
for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) {
if (liveDocs != null && liveDocs.get(docId) == false) {
continue;
}
long globalOrdinal = globalOrdinals.getOrd(docId);
if (globalOrdinal != -1) {
long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
if (bucketOrd != -1) {
collectBucket(sub, docId, bucketOrd);
if (multipleBucketsPerParentOrd) {
long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
if (otherBucketOrds != null) {
for (long otherBucketOrd : otherBucketOrds) {
collectBucket(sub, docId, otherBucketOrd);
}
}
}
}
}
}
}
}
protected Scorer getScorer(final LeafReader reader, final Weight weight, final float boost, final ScoreMode scoreMode) throws IOException {
switch (query.getQueryRelation()) {
case INTERSECTS: return getSparseScorer(reader, weight, boost, scoreMode);
case WITHIN:
case DISJOINT: return getDenseScorer(reader, weight, boost, scoreMode);
case CONTAINS: return getContainsDenseScorer(reader, weight, boost, scoreMode);
default: throw new IllegalArgumentException("Unsupported query type :[" + query.getQueryRelation() + "]");
}
}
public void setScorer(Scorer scorer) throws IOException {
this.currentScorer = scorer;
for (int i = 0; i < perBucketSamples.size(); i++) {
PerParentBucketSamples perBucketSample = perBucketSamples.get(i);
if (perBucketSample == null) {
continue;
}
perBucketSample.setScorer(scorer);
}
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER);
final Scorer subQueryScorer;
try {
subQueryScorer = subQueryWeight.scorer(context);
} finally {
profile.stopAndRecordTime();
}
if (subQueryScorer == null) {
return null;
}
return new ProfileScorer(this, subQueryScorer, profile);
}
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
Timer timer = profile.getTimer(QueryTimingType.BUILD_SCORER);
timer.start();
final ScorerSupplier subQueryScorerSupplier;
try {
subQueryScorerSupplier = subQueryWeight.scorerSupplier(context);
} finally {
timer.stop();
}
if (subQueryScorerSupplier == null) {
return null;
}
final ProfileWeight weight = this;
return new ScorerSupplier() {
@Override
public Scorer get(long loadCost) throws IOException {
timer.start();
try {
return new ProfileScorer(weight, subQueryScorerSupplier.get(loadCost), profile);
} finally {
timer.stop();
}
}
@Override
public long cost() {
timer.start();
try {
return subQueryScorerSupplier.cost();
} finally {
timer.stop();
}
}
};
}
private SparseModelScorer(Weight weight,
List<Feature.FeatureWeight.FeatureScorer> featureScorers) {
super(weight);
if (featureScorers.size() <= 1) {
throw new IllegalArgumentException(
"There must be at least 2 subScorers");
}
subScorers = new DisiPriorityQueue(featureScorers.size());
for (final Scorer scorer : featureScorers) {
final DisiWrapper w = new DisiWrapper(scorer);
subScorers.add(w);
}
itr = new ScoringQuerySparseIterator(subScorers);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context);
if (scorer != null) {
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
return Explanation.match(
scorer.score(),
"Stat Score: " + type);
}
}
return Explanation.noMatch("no matching term");
}