类org.apache.lucene.search.Scorer源码实例Demo

下面列出了怎么用org.apache.lucene.search.Scorer的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: ShapeQuery.java
/** Scorer used for WITHIN and DISJOINT **/
private Scorer getDenseScorer(LeafReader reader, Weight weight, final float boost, ScoreMode scoreMode) throws IOException {
  final FixedBitSet result = new FixedBitSet(reader.maxDoc());
  final long[] cost;
  if (values.getDocCount() == reader.maxDoc()) {
    cost = new long[]{values.size()};
    // In this case we can spare one visit to the tree, all documents
    // are potential matches
    result.set(0, reader.maxDoc());
    // Remove false positives
    values.intersect(getInverseDenseVisitor(query, result, cost));
  } else {
    cost = new long[]{0};
    // Get potential  documents.
    final FixedBitSet excluded = new FixedBitSet(reader.maxDoc());
    values.intersect(getDenseVisitor(query, result, excluded, cost));
    result.andNot(excluded);
    // Remove false positives, we only care about the inner nodes as intersecting
    // leaf nodes have been already taken into account. Unfortunately this
    // process still reads the leaf nodes.
    values.intersect(getShallowInverseDenseVisitor(query, result));
  }
  assert cost[0] > 0;
  final DocIdSetIterator iterator = new BitSetIterator(result, cost[0]);
  return new ConstantScoreScorer(weight, boost, scoreMode, iterator);
}
 
源代码2 项目: lucene-solr   文件: ValueSourceRangeFilter.java
@Override
@SuppressWarnings({"rawtypes"})
public DocIdSet getDocIdSet(final Map context, final LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
  // NB the IndexSearcher parameter here can be null because Filter Weights don't
  // actually use it.
  Weight weight = createWeight(null, ScoreMode.COMPLETE, 1);
  return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       @SuppressWarnings({"unchecked"})
       Scorer scorer = valueSource.getValues(context, readerContext).getRangeScorer(weight, readerContext, lowerVal, upperVal, includeLower, includeUpper);
       return scorer == null ? null : scorer.iterator();
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }

     @Override
     public long ramBytesUsed() {
       return 0L;
     }
   }, acceptDocs);
}
 
源代码3 项目: lucene-solr   文件: BlockJoinParentQParser.java
@Override
public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(BitSetProducerQuery.this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      BitSet bitSet = bitSetProducer.getBitSet(context);
      if (bitSet == null) {
        return null;
      }
      DocIdSetIterator disi = new BitSetIterator(bitSet, bitSet.approximateCardinality());
      return new ConstantScoreScorer(this, boost, scoreMode, disi);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return getCache();
    }
  };
}
 
源代码4 项目: lucene-solr   文件: GlobalOrdinalsQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  SortedDocValues values = DocValues.getSorted(context.reader(), joinField);
  if (values == null) {
    return null;
  }

  Scorer approximationScorer = approximationWeight.scorer(context);
  if (approximationScorer == null) {
    return null;
  }
  if (globalOrds != null) {
    return new OrdinalMapScorer(this, score(), foundOrds, values, approximationScorer.iterator(), globalOrds.getGlobalOrds(context.ord));
  } {
    return new SegmentOrdinalScorer(this, score(), foundOrds, values, approximationScorer.iterator());
  }
}
 
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
    for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
        String name = entry.getKey();
        Query filter = entry.getValue();

        final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
        final Scorer scorer = weight.scorer(hitContext.readerContext());
        if (scorer == null) {
            continue;
        }
        final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
        if (twoPhase == null) {
            if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
                matchedQueries.add(name);
            }
        } else {
            if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
                matchedQueries.add(name);
            }
        }
    }
}
 
源代码6 项目: Elasticsearch   文件: IncludeNestedDocsQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    final Scorer parentScorer = parentWeight.scorer(context);

    // no matches
    if (parentScorer == null) {
        return null;
    }

    BitSet parents = parentsFilter.getBitSet(context);
    if (parents == null) {
        // No matches
        return null;
    }

    int firstParentDoc = parentScorer.iterator().nextDoc();
    if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
        // No matches
        return null;
    }
    return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
 
源代码7 项目: Elasticsearch   文件: IncludeNestedDocsQuery.java
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, BitSet parentBits, int currentParentPointer) {
    super(weight);
    this.parentScorer = parentScorer;
    this.parentBits = parentBits;
    this.currentParentPointer = currentParentPointer;
    if (currentParentPointer == 0) {
        currentChildPointer = 0;
    } else {
        this.currentChildPointer = this.parentBits.prevSetBit(currentParentPointer - 1);
        if (currentChildPointer == -1) {
            // no previous set parent, we delete from doc 0
            currentChildPointer = 0;
        } else {
            currentChildPointer++; // we only care about children
        }
    }

    currentDoc = currentChildPointer;
}
 
源代码8 项目: Elasticsearch   文件: ParentQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
    // we forcefully apply live docs here so that deleted children don't give matching parents
    childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
    if (Lucene.isEmpty(childrenDocSet)) {
        return null;
    }
    final DocIdSetIterator childIterator = childrenDocSet.iterator();
    if (childIterator == null) {
        return null;
    }
    SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (bytesValues == null) {
        return null;
    }

    return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
 
源代码9 项目: Elasticsearch   文件: ParentConstantScoreQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
    if (Lucene.isEmpty(childrenDocIdSet)) {
        return null;
    }

    SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (globalValues != null) {
        // we forcefully apply live docs here so that deleted children don't give matching parents
        childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
        if (innerIterator != null) {
            ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
                    innerIterator, parentOrds, globalValues
            );
            return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
        }
    }
    return null;
}
 
源代码10 项目: lucene-solr   文件: LTRScoringQuery.java
@Override
public float score() throws IOException {
  reset();
  freq = 0;
  if (targetDoc == activeDoc) {
    for (final Scorer scorer : featureScorers) {
      if (scorer.docID() == activeDoc) {
        freq++;
        Feature.FeatureWeight scFW = (Feature.FeatureWeight) scorer.getWeight();
        final int featureId = scFW.getIndex();
        featuresInfo[featureId].setValue(scorer.score());
        featuresInfo[featureId].setUsed(true);
      }
    }
  }
  return makeNormalizedFeaturesAndScore();
}
 
源代码11 项目: lucene-solr   文件: ToParentBlockJoinQuery.java
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
  // The default implementation would delegate to the joinQuery's Weight, which
  // matches on children.  We need to match on the parent instead
  Scorer scorer = scorer(context);
  if (scorer == null) {
    return null;
  }
  final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
  if (twoPhase == null) {
    if (scorer.iterator().advance(doc) != doc) {
      return null;
    }
  }
  else {
    if (twoPhase.approximation().advance(doc) != doc || twoPhase.matches() == false) {
      return null;
    }
  }
  return MatchesUtils.MATCH_WITH_NO_TERMS;
}
 
源代码12 项目: lucene-solr   文件: TestLTRScoringQuery.java
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits,
    IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException,
    ModelException {
  final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
      .leaves();
  final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
  final LeafReaderContext context = leafContexts.get(n);
  final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

  final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(context);

  // rerank using the field final-score
  scorer.iterator().advance(deBasedDoc);
  scorer.score();

  // assertEquals(42.0f, score, 0.0001);
  // assertTrue(weight instanceof AssertingWeight);
  // (AssertingIndexSearcher)
  assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
  final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
  return modelWeight;

}
 
/**
 * @param doc SolrDocument to check
 * @param idField field where the id is stored
 * @param fieldType type of id field
 * @param filterQuery Query to filter by
 * @param searcher SolrIndexSearcher on which to apply the filter query
 * @returns the internal docid, or -1 if doc is not found or doesn't match filter
 */
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
      Query filterQuery, SolrIndexSearcher searcher) throws IOException {
  int docid = -1;
  Field f = (Field)doc.getFieldValue(idField.getName());
  String idStr = f.stringValue();
  BytesRef idBytes = new BytesRef();
  fieldType.readableToIndexed(idStr, idBytes);
  // get the internal document id
  long segAndId = searcher.lookupId(idBytes);

    // if docid is valid, run it through the filter
  if (segAndId >= 0) {
    int segid = (int) segAndId;
    AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
    docid = segid + ctx.docBase;
    Weight weight = filterQuery.createWeight(searcher);
    Scorer scorer = weight.scorer(ctx, null);
    if (scorer == null || segid != scorer.advance(segid)) {
      // filter doesn't match.
      docid = -1;
    }
  }
  return docid;
}
 
源代码14 项目: lucene-solr   文件: SerializedDVStrategy.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
      TwoPhaseIterator it = predicateValueSource.iterator(context, approximation);
      return new ConstantScoreScorer(this, score(), scoreMode, it);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return predicateValueSource.isCacheable(ctx);
    }

  };
}
 
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    Bindings bindings = new Bindings(){
        @Override
        public DoubleValuesSource getDoubleValuesSource(String name) {
            Double queryParamValue  = queryParamValues.get(name);
            if (queryParamValue != null) {
                return DoubleValuesSource.constant(queryParamValue);
            }
            return new FVDoubleValuesSource(vectorSupplier, features.featureOrdinal(name));
        }
    };

    DocIdSetIterator iterator = DocIdSetIterator.all(context.reader().maxDoc());
    DoubleValuesSource src = expression.getDoubleValuesSource(bindings);
    DoubleValues values = src.getValues(context, null);

    return new DValScorer(this, iterator, values);
}
 
源代码16 项目: lucene-solr   文件: FunctionScoreQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  Scorer in = inner.scorer(context);
  if (in == null)
    return null;
  DoubleValues scores = valueSource.getValues(context, DoubleValuesSource.fromScorer(in));
  return new FilterScorer(in) {
    @Override
    public float score() throws IOException {
      if (scores.advanceExact(docID())) {
        double factor = scores.doubleValue();
        if (factor >= 0) {
          return (float) (factor * boost);
        }
      }
      // default: missing value, negative value or NaN
      return 0;
    }
    @Override
    public float getMaxScore(int upTo) throws IOException {
      return Float.POSITIVE_INFINITY;
    }
  };
}
 
源代码17 项目: crate   文件: GroupingLongCollectorBenchmark.java
@Benchmark
public LongObjectHashMap<Long> measureGroupingOnNumericDocValues() throws Exception {
    Weight weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
    LeafReaderContext leaf = searcher.getTopReaderContext().leaves().get(0);
    Scorer scorer = weight.scorer(leaf);
    NumericDocValues docValues = DocValues.getNumeric(leaf.reader(), "x");
    DocIdSetIterator docIt = scorer.iterator();
    LongObjectHashMap<Long> sumByKey = new LongObjectHashMap<>();
    for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) {
        if (docValues.advanceExact(docId)) {
            long number = docValues.longValue();
            sumByKey.compute(number, (key, oldValue) -> {
                if (oldValue == null) {
                    return number;
                } else {
                    return oldValue + number;
                }
            });
        }
    }
    return sumByKey;
}
 
源代码18 项目: lucene-solr   文件: RangeFieldQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  ScorerSupplier scorerSupplier = scorerSupplier(context);
  if (scorerSupplier == null) {
    return null;
  }
  return scorerSupplier.get(Long.MAX_VALUE);
}
 
源代码19 项目: lucene-solr   文件: TestNearSpansOrdered.java
/**
 * not a direct test of NearSpans, but a demonstration of how/when
 * this causes problems
 */
public void testSpanNearScorerSkipTo1() throws Exception {
  SpanNearQuery q = makeQuery();
  Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1);
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  LeafReaderContext leave = topReaderContext.leaves().get(0);
  Scorer s = w.scorer(leave);
  assertEquals(1, s.iterator().advance(1));
}
 
源代码20 项目: lucene-solr   文件: ToParentBlockJoinQuery.java
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
  final ScorerSupplier childScorerSupplier = in.scorerSupplier(context);
  if (childScorerSupplier == null) {
    return null;
  }

  // NOTE: this does not take accept docs into account, the responsibility
  // to not match deleted docs is on the scorer
  final BitSet parents = parentsFilter.getBitSet(context);
  if (parents == null) {
    // No matches
    return null;
  }

  return new ScorerSupplier() {

    @Override
    public Scorer get(long leadCost) throws IOException {
      return new BlockJoinScorer(BlockJoinWeight.this, childScorerSupplier.get(leadCost), parents, scoreMode);
    }

    @Override
    public long cost() {
      return childScorerSupplier.cost();
    }
  };
}
 
@Override
public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) throws IOException {
  CodecReader reader = readerIOSupplier.get();
  /* we only need a single hit to keep it no need for soft deletes to be checked*/
  Scorer scorer = getScorer(retentionQuerySupplier.get(), FilterCodecReader.wrapLiveDocs(reader, null, reader.maxDoc()));
  if (scorer != null) {
    DocIdSetIterator iterator = scorer.iterator();
    boolean atLeastOneHit = iterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS;
    return atLeastOneHit;
  }
  return super.keepFullyDeletedSegment(readerIOSupplier) ;
}
 
源代码22 项目: incubator-retired-blur   文件: SuperQuery.java
protected SuperScorer(Scorer scorer, OpenBitSet bitSet, String originalQueryStr, ScoreType scoreType) {
  super(scorer.getWeight());
  this.scorer = scorer;
  this.bitSet = bitSet;
  this.originalQueryStr = originalQueryStr;
  this.scoreType = scoreType;
}
 
源代码23 项目: querqy   文件: FieldBoostTermQueryBuilder.java
@Override
public Scorer scorer(final LeafReaderContext context) throws IOException {
    assert termStates != null && termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context))
            : "The top-reader used to create Weight is not the same as the current reader's top-reader: " + ReaderUtil.getTopLevelContext(context);
    final TermsEnum termsEnum = getTermsEnum(context);
    if (termsEnum == null) {
        return null;
    }
    PostingsEnum docs = termsEnum.postings(null, PostingsEnum.NONE);
    assert docs != null;
    return new TermBoostScorer(this, docs, score);
}
 
源代码24 项目: Elasticsearch   文件: ParentToChildrenAggregator.java
@Override
protected void doPostCollection() throws IOException {
    IndexReader indexReader = context().searchContext().searcher().getIndexReader();
    for (LeafReaderContext ctx : indexReader.leaves()) {
        Scorer childDocsScorer = childFilter.scorer(ctx);
        if (childDocsScorer == null) {
            continue;
        }
        DocIdSetIterator childDocsIter = childDocsScorer.iterator();

        final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
        final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);

        // Set the scorer, since we now replay only the child docIds
        sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f));

        final Bits liveDocs = ctx.reader().getLiveDocs();
        for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) {
            if (liveDocs != null && liveDocs.get(docId) == false) {
                continue;
            }
            long globalOrdinal = globalOrdinals.getOrd(docId);
            if (globalOrdinal != -1) {
                long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
                if (bucketOrd != -1) {
                    collectBucket(sub, docId, bucketOrd);
                    if (multipleBucketsPerParentOrd) {
                        long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal);
                        if (otherBucketOrds != null) {
                            for (long otherBucketOrd : otherBucketOrds) {
                                collectBucket(sub, docId, otherBucketOrd);
                            }
                        }
                    }
                }
            }
        }
    }
}
 
源代码25 项目: lucene-solr   文件: ShapeQuery.java
protected Scorer getScorer(final LeafReader reader, final Weight weight, final float boost, final ScoreMode scoreMode) throws IOException {
  switch (query.getQueryRelation()) {
    case INTERSECTS: return getSparseScorer(reader, weight, boost, scoreMode);
    case WITHIN:
    case DISJOINT: return getDenseScorer(reader, weight, boost, scoreMode);
    case CONTAINS: return getContainsDenseScorer(reader, weight, boost, scoreMode);
    default: throw new IllegalArgumentException("Unsupported query type :[" + query.getQueryRelation() + "]");
  }
}
 
源代码26 项目: Elasticsearch   文件: BestDocsDeferringCollector.java
public void setScorer(Scorer scorer) throws IOException {
    this.currentScorer = scorer;
    for (int i = 0; i < perBucketSamples.size(); i++) {
        PerParentBucketSamples perBucketSample = perBucketSamples.get(i);
        if (perBucketSample == null) {
            continue;
        }
        perBucketSample.setScorer(scorer);
    }
}
 
源代码27 项目: Elasticsearch   文件: ProfileWeight.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER);
    final Scorer subQueryScorer;
    try {
        subQueryScorer = subQueryWeight.scorer(context);
    } finally {
        profile.stopAndRecordTime();
    }
    if (subQueryScorer == null) {
        return null;
    }

    return new ProfileScorer(this, subQueryScorer, profile);
}
 
源代码28 项目: crate   文件: ProfileWeight.java
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
    Timer timer = profile.getTimer(QueryTimingType.BUILD_SCORER);
    timer.start();
    final ScorerSupplier subQueryScorerSupplier;
    try {
        subQueryScorerSupplier = subQueryWeight.scorerSupplier(context);
    } finally {
        timer.stop();
    }
    if (subQueryScorerSupplier == null) {
        return null;
    }

    final ProfileWeight weight = this;
    return new ScorerSupplier() {

        @Override
        public Scorer get(long loadCost) throws IOException {
            timer.start();
            try {
                return new ProfileScorer(weight, subQueryScorerSupplier.get(loadCost), profile);
            } finally {
                timer.stop();
            }
        }

        @Override
        public long cost() {
            timer.start();
            try {
                return subQueryScorerSupplier.cost();
            } finally {
                timer.stop();
            }
        }
    };
}
 
源代码29 项目: lucene-solr   文件: LTRScoringQuery.java
private SparseModelScorer(Weight weight,
    List<Feature.FeatureWeight.FeatureScorer> featureScorers) {
  super(weight);
  if (featureScorers.size() <= 1) {
    throw new IllegalArgumentException(
        "There must be at least 2 subScorers");
  }
  subScorers = new DisiPriorityQueue(featureScorers.size());
  for (final Scorer scorer : featureScorers) {
    final DisiWrapper w = new DisiWrapper(scorer);
    subScorers.add(w);
  }

  itr = new ScoringQuerySparseIterator(subScorers);
}
 
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
    Scorer scorer = scorer(context);

    if (scorer != null) {
        int newDoc = scorer.iterator().advance(doc);
        if (newDoc == doc) {
            return Explanation.match(
                    scorer.score(),
                    "Stat Score: " + type);
        }
    }
    return Explanation.noMatch("no matching term");
}
 
 类所在包
 同包方法