org.apache.lucene.search.Weight#scorer ( )源码实例Demo

下面列出了org.apache.lucene.search.Weight#scorer ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: crate   文件: Lucene.java
/**
 * Check whether there is one or more documents matching the provided query.
 */
public static boolean exists(IndexSearcher searcher, Query query) throws IOException {
    final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
    // the scorer API should be more efficient at stopping after the first
    // match than the bulk scorer API
    for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
        final Scorer scorer = weight.scorer(context);
        if (scorer == null) {
            continue;
        }
        final Bits liveDocs = context.reader().getLiveDocs();
        final DocIdSetIterator iterator = scorer.iterator();
        for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
            if (liveDocs == null || liveDocs.get(doc)) {
                return true;
            }
        }
    }
    return false;
}
 
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
    for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
        String name = entry.getKey();
        Query filter = entry.getValue();

        final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
        final Scorer scorer = weight.scorer(hitContext.readerContext());
        if (scorer == null) {
            continue;
        }
        final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
        if (twoPhase == null) {
            if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
                matchedQueries.add(name);
            }
        } else {
            if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
                matchedQueries.add(name);
            }
        }
    }
}
 
源代码3 项目: lucene-solr   文件: CompositeVerifyQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight indexQueryWeight = indexQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);//scores aren't unsupported

  return new ConstantScoreWeight(this, boost) {

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {

      final Scorer indexQueryScorer = indexQueryWeight.scorer(context);
      if (indexQueryScorer == null) {
        return null;
      }

      final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, indexQueryScorer.iterator());
      return new ConstantScoreScorer(this, score(), scoreMode, predFuncValues);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return predicateValueSource.isCacheable(ctx);
    }

  };
}
 
源代码4 项目: lucene-solr   文件: TestBlockJoinValidation.java
public void testAdvanceValidationForToChildBjq() throws Exception {
  Query parentQuery = new MatchAllDocsQuery();
  ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter);

  final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0);
  Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
  Scorer scorer = weight.scorer(context);
  final Bits parentDocs = parentsFilter.getBitSet(context);

  int target;
  do {
    // make the parent scorer advance to a doc ID which is not a parent
    target = TestUtil.nextInt(random(), 0, context.reader().maxDoc() - 2);
  } while (parentDocs.get(target + 1));

  final int illegalTarget = target;
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    scorer.iterator().advance(illegalTarget);
  });
  assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE));
}
 
源代码5 项目: lucene-solr   文件: TestSelectiveWeightCreation.java
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits,
    IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException,
    ModelException {
  final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
      .leaves();
  final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
  final LeafReaderContext context = leafContexts.get(n);
  final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

  final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(context);

  // rerank using the field final-score
  scorer.iterator().advance(deBasedDoc);
  scorer.score();
  assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
  final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
  return modelWeight;

}
 
源代码6 项目: lucene-solr   文件: TestLTRScoringQuery.java
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits,
    IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException,
    ModelException {
  final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
      .leaves();
  final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
  final LeafReaderContext context = leafContexts.get(n);
  final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

  final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(context);

  // rerank using the field final-score
  scorer.iterator().advance(deBasedDoc);
  scorer.score();

  // assertEquals(42.0f, score, 0.0001);
  // assertTrue(weight instanceof AssertingWeight);
  // (AssertingIndexSearcher)
  assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
  final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
  return modelWeight;

}
 
/**
 * @param doc SolrDocument to check
 * @param idField field where the id is stored
 * @param fieldType type of id field
 * @param filterQuery Query to filter by
 * @param searcher SolrIndexSearcher on which to apply the filter query
 * @returns the internal docid, or -1 if doc is not found or doesn't match filter
 */
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
      Query filterQuery, SolrIndexSearcher searcher) throws IOException {
  int docid = -1;
  Field f = (Field)doc.getFieldValue(idField.getName());
  String idStr = f.stringValue();
  BytesRef idBytes = new BytesRef();
  fieldType.readableToIndexed(idStr, idBytes);
  // get the internal document id
  long segAndId = searcher.lookupId(idBytes);

    // if docid is valid, run it through the filter
  if (segAndId >= 0) {
    int segid = (int) segAndId;
    AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
    docid = segid + ctx.docBase;
    Weight weight = filterQuery.createWeight(searcher);
    Scorer scorer = weight.scorer(ctx, null);
    if (scorer == null || segid != scorer.advance(segid)) {
      // filter doesn't match.
      docid = -1;
    }
  }
  return docid;
}
 
源代码8 项目: Elasticsearch   文件: DocumentMapper.java
/**
 * Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId.
 */
public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException {
    ObjectMapper nestedObjectMapper = null;
    for (ObjectMapper objectMapper : objectMappers().values()) {
        if (!objectMapper.nested().isNested()) {
            continue;
        }

        Query filter = objectMapper.nestedTypeFilter();
        if (filter == null) {
            continue;
        }
        // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and
        // therefor is guaranteed to be a live doc.
        final Weight nestedWeight = filter.createWeight(sc.searcher(), false);
        Scorer scorer = nestedWeight.scorer(context);
        if (scorer == null) {
            continue;
        }

        if (scorer.iterator().advance(nestedDocId) == nestedDocId) {
            if (nestedObjectMapper == null) {
                nestedObjectMapper = objectMapper;
            } else {
                if (nestedObjectMapper.fullPath().length() < objectMapper.fullPath().length()) {
                    nestedObjectMapper = objectMapper;
                }
            }
        }
    }
    return nestedObjectMapper;
}
 
源代码9 项目: crate   文件: DocValuesAggregates.java
@SuppressWarnings({"unchecked", "rawtypes"})
private static Iterable<Row> getRow(AtomicReference<Throwable> killed,
                                    Searcher searcher,
                                    Query query,
                                    List<DocValueAggregator> aggregators) throws IOException {
    IndexSearcher indexSearcher = searcher.searcher();
    Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
    List<LeafReaderContext> leaves = indexSearcher.getTopReaderContext().leaves();
    Object[] cells = new Object[aggregators.size()];
    for (int i = 0; i < aggregators.size(); i++) {
        cells[i] = aggregators.get(i).initialState();
    }
    for (var leaf : leaves) {
        Scorer scorer = weight.scorer(leaf);
        if (scorer == null) {
            continue;
        }
        for (int i = 0; i < aggregators.size(); i++) {
            aggregators.get(i).loadDocValues(leaf.reader());
        }
        DocIdSetIterator docs = scorer.iterator();
        Bits liveDocs = leaf.reader().getLiveDocs();
        for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docs.nextDoc()) {
            if (liveDocs != null && !liveDocs.get(doc)) {
                continue;
            }
            Throwable killCause = killed.get();
            if (killCause != null) {
                Exceptions.rethrowUnchecked(killCause);
            }
            for (int i = 0; i < aggregators.size(); i++) {
                aggregators.get(i).apply(cells[i], doc);
            }
        }
    }
    for (int i = 0; i < aggregators.size(); i++) {
        cells[i] = aggregators.get(i).partialResult(cells[i]);
    }
    return List.of(new RowN(cells));
}
 
源代码10 项目: lucene-solr   文件: TestRangeFacetCounts.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight in = this.in.createWeight(searcher, scoreMode, boost);
  return new FilterWeight(in) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      used.set(true);
      return in.scorer(context);
    }
  };
}
 
源代码11 项目: lucene-solr   文件: TestNearSpansOrdered.java
/**
 * not a direct test of NearSpans, but a demonstration of how/when
 * this causes problems
 */
public void testSpanNearScorerSkipTo1() throws Exception {
  SpanNearQuery q = makeQuery();
  Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1);
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  LeafReaderContext leave = topReaderContext.leaves().get(0);
  Scorer s = w.scorer(leave);
  assertEquals(1, s.iterator().advance(1));
}
 
源代码12 项目: Elasticsearch   文件: FetchPhase.java
private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException {
    int currentParent = nestedSubDocId;
    ObjectMapper nestedParentObjectMapper;
    ObjectMapper current = nestedObjectMapper;
    String originalName = nestedObjectMapper.name();
    InternalSearchHit.InternalNestedIdentity nestedIdentity = null;
    do {
        Query parentFilter;
        nestedParentObjectMapper = documentMapper.findParentObjectMapper(current);
        if (nestedParentObjectMapper != null) {
            if (nestedParentObjectMapper.nested().isNested() == false) {
                current = nestedParentObjectMapper;
                continue;
            }
            parentFilter = nestedParentObjectMapper.nestedTypeFilter();
        } else {
            parentFilter = Queries.newNonNestedFilter();
        }

        Query childFilter = nestedObjectMapper.nestedTypeFilter();
        if (childFilter == null) {
            current = nestedParentObjectMapper;
            continue;
        }
        final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false);
        Scorer childScorer = childWeight.scorer(subReaderContext);
        if (childScorer == null) {
            current = nestedParentObjectMapper;
            continue;
        }
        DocIdSetIterator childIter = childScorer.iterator();

        BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext);

        int offset = 0;
        int nextParent = parentBits.nextSetBit(currentParent);
        for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) {
            offset++;
        }
        currentParent = nextParent;
        current = nestedObjectMapper = nestedParentObjectMapper;
        int currentPrefix = current == null ? 0 : current.name().length() + 1;
        nestedIdentity = new InternalSearchHit.InternalNestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity);
        if (current != null) {
            originalName = current.name();
        }
    } while (current != null);
    return nestedIdentity;
}
 
源代码13 项目: Elasticsearch   文件: GeoDistanceRangeQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    final Weight boundingBoxWeight;
    if (boundingBoxFilter != null) {
        boundingBoxWeight = searcher.createNormalizedWeight(boundingBoxFilter, false);
    } else {
        boundingBoxWeight = null;
    }
    return new ConstantScoreWeight(this) {
        @Override
        public Scorer scorer(LeafReaderContext context) throws IOException {
            final DocIdSetIterator approximation;
            if (boundingBoxWeight != null) {
                Scorer s = boundingBoxWeight.scorer(context);
                if (s == null) {
                    // if the approximation does not match anything, we're done
                    return null;
                }
                approximation = s.iterator();
            } else {
                approximation = DocIdSetIterator.all(context.reader().maxDoc());
            }
            final MultiGeoPointValues values = indexFieldData.load(context).getGeoPointValues();
            final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approximation) {
                @Override
                public boolean matches() throws IOException {
                    final int doc = approximation.docID();
                    values.setDocument(doc);
                    final int length = values.count();
                    for (int i = 0; i < length; i++) {
                        GeoPoint point = values.valueAt(i);
                        if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) {
                            double d = fixedSourceDistance.calculate(point.lat(), point.lon());
                            if (d >= inclusiveLowerPoint && d <= inclusiveUpperPoint) {
                                return true;
                            }
                        }
                    }
                    return false;
                }

                @Override
                public float matchCost() {
                    if (distanceBoundingCheck == GeoDistance.ALWAYS_INSTANCE) {
                        return 0.0f;
                    } else {
                        // TODO: is this right (up to 4 comparisons from GeoDistance.SimpleDistanceBoundingCheck)?
                        return 4.0f;
                    }
                }
            };
            return new ConstantScoreScorer(this, score(), twoPhaseIterator);
        }
    };
}
 
源代码14 项目: Elasticsearch   文件: FilterableTermsEnum.java
public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter) throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }
    this.docsEnumFlag = docsEnumFlag;
    if (filter == null) {
        // Important - need to use the doc count that includes deleted docs
        // or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
        numDocs = reader.maxDoc();
    }
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
            builder.or(docs);
            bits = builder.build().bits();

            // Count how many docs are in our filtered set
            // TODO make this lazy-loaded only for those that need it?
            numDocs += bits.cardinality();
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}
 
源代码15 项目: lucene-solr   文件: DoubleRangeFacetCounts.java
private void count(DoubleValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {

    DoubleRange[] ranges = (DoubleRange[]) this.ranges;

    LongRange[] longRanges = new LongRange[ranges.length];
    for(int i=0;i<ranges.length;i++) {
      DoubleRange range = ranges[i];
      longRanges[i] =  new LongRange(range.label,
                                     NumericUtils.doubleToSortableLong(range.min), true,
                                     NumericUtils.doubleToSortableLong(range.max), true);
    }

    LongRangeCounter counter = new LongRangeCounter(longRanges);

    int missingCount = 0;
    for (MatchingDocs hits : matchingDocs) {
      DoubleValues fv = valueSource.getValues(hits.context, null);
      
      totCount += hits.totalHits;
      final DocIdSetIterator fastMatchDocs;
      if (fastMatchQuery != null) {
        final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context);
        final IndexSearcher searcher = new IndexSearcher(topLevelContext);
        searcher.setQueryCache(null);
        final Weight fastMatchWeight = searcher.createWeight(searcher.rewrite(fastMatchQuery), ScoreMode.COMPLETE_NO_SCORES, 1);
        Scorer s = fastMatchWeight.scorer(hits.context);
        if (s == null) {
          continue;
        }
        fastMatchDocs = s.iterator();
      } else {
        fastMatchDocs = null;
      }

      DocIdSetIterator docs = hits.bits.iterator();

      for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
        if (fastMatchDocs != null) {
          int fastMatchDoc = fastMatchDocs.docID();
          if (fastMatchDoc < doc) {
            fastMatchDoc = fastMatchDocs.advance(doc);
          }

          if (doc != fastMatchDoc) {
            doc = docs.advance(fastMatchDoc);
            continue;
          }
        }
        // Skip missing docs:
        if (fv.advanceExact(doc)) {
          counter.add(NumericUtils.doubleToSortableLong(fv.doubleValue()));
        } else {
          missingCount++;
        }

        doc = docs.nextDoc();
      }
    }

    missingCount += counter.fillCounts(counts);
    totCount -= missingCount;
  }
 
源代码16 项目: lucene-solr   文件: LongRange.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight fastMatchWeight = fastMatchQuery == null
      ? null
      : searcher.createWeight(fastMatchQuery, ScoreMode.COMPLETE_NO_SCORES, 1f);

  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      final int maxDoc = context.reader().maxDoc();

      final DocIdSetIterator approximation;
      if (fastMatchWeight == null) {
        approximation = DocIdSetIterator.all(maxDoc);
      } else {
        Scorer s = fastMatchWeight.scorer(context);
        if (s == null) {
          return null;
        }
        approximation = s.iterator();
      }

      final LongValues values = valueSource.getValues(context, null);
      final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) {
        @Override
        public boolean matches() throws IOException {
          return values.advanceExact(approximation.docID()) && range.accept(values.longValue());
        }

        @Override
        public float matchCost() {
          return 100; // TODO: use cost of range.accept()
        }
      };
      return new ConstantScoreScorer(this, score(), scoreMode, twoPhase);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return valueSource.isCacheable(ctx);
    }

  };
}
 
源代码17 项目: lucene-solr   文件: DoubleRange.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight fastMatchWeight = fastMatchQuery == null
      ? null
      : searcher.createWeight(fastMatchQuery, ScoreMode.COMPLETE_NO_SCORES, 1f);

  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      final int maxDoc = context.reader().maxDoc();

      final DocIdSetIterator approximation;
      if (fastMatchWeight == null) {
        approximation = DocIdSetIterator.all(maxDoc);
      } else {
        Scorer s = fastMatchWeight.scorer(context);
        if (s == null) {
          return null;
        }
        approximation = s.iterator();
      }

      final DoubleValues values = valueSource.getValues(context, null);
      final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) {
        @Override
        public boolean matches() throws IOException {
          return values.advanceExact(approximation.docID()) && range.accept(values.doubleValue());
        }

        @Override
        public float matchCost() {
          return 100; // TODO: use cost of range.accept()
        }
      };
      return new ConstantScoreScorer(this, score(), scoreMode, twoPhase);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return valueSource.isCacheable(ctx);
    }

  };
}
 
源代码18 项目: lucene-solr   文件: LongRangeFacetCounts.java
private void count(LongValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {

    LongRange[] ranges = (LongRange[]) this.ranges;

    LongRangeCounter counter = new LongRangeCounter(ranges);

    int missingCount = 0;
    for (MatchingDocs hits : matchingDocs) {
      LongValues fv = valueSource.getValues(hits.context, null);
      
      totCount += hits.totalHits;
      final DocIdSetIterator fastMatchDocs;
      if (fastMatchQuery != null) {
        final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context);
        final IndexSearcher searcher = new IndexSearcher(topLevelContext);
        searcher.setQueryCache(null);
        final Weight fastMatchWeight = searcher.createWeight(searcher.rewrite(fastMatchQuery), ScoreMode.COMPLETE_NO_SCORES, 1);
        Scorer s = fastMatchWeight.scorer(hits.context);
        if (s == null) {
          continue;
        }
        fastMatchDocs = s.iterator();
      } else {
        fastMatchDocs = null;
      }

      DocIdSetIterator docs = hits.bits.iterator();      
      for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
        if (fastMatchDocs != null) {
          int fastMatchDoc = fastMatchDocs.docID();
          if (fastMatchDoc < doc) {
            fastMatchDoc = fastMatchDocs.advance(doc);
          }

          if (doc != fastMatchDoc) {
            doc = docs.advance(fastMatchDoc);
            continue;
          }
        }
        // Skip missing docs:
        if (fv.advanceExact(doc)) {
          counter.add(fv.longValue());
        } else {
          missingCount++;
        }

        doc = docs.nextDoc();
      }
    }
    
    int x = counter.fillCounts(counts);

    missingCount += x;

    //System.out.println("totCount " + totCount + " x " + x + " missingCount " + missingCount);
    totCount -= missingCount;
  }
 
private static Scorer getScorer(Query query, CodecReader reader) throws IOException {
  IndexSearcher s = new IndexSearcher(reader);
  s.setQueryCache(null);
  Weight weight = s.createWeight(s.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
  return weight.scorer(reader.getContext());
}
 
void doLog(Query query, List<HitLogConsumer> loggers, IndexSearcher searcher, SearchHit[] hits) throws IOException {
    // Reorder hits by id so we can scan all the docs belonging to the same
    // segment by reusing the same scorer.
    SearchHit[] reordered = new SearchHit[hits.length];
    System.arraycopy(hits, 0, reordered, 0, hits.length);
    Arrays.sort(reordered, Comparator.comparingInt(SearchHit::docId));

    int hitUpto = 0;
    int readerUpto = -1;
    int endDoc = 0;
    int docBase = 0;
    Scorer scorer = null;
    Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1F);
    // Loop logic borrowed from lucene QueryRescorer
    while (hitUpto < reordered.length) {
        SearchHit hit = reordered[hitUpto];
        int docID = hit.docId();
        loggers.forEach((l) -> l.nextDoc(hit));
        LeafReaderContext readerContext = null;
        while (docID >= endDoc) {
            readerUpto++;
            readerContext = searcher.getTopReaderContext().leaves().get(readerUpto);
            endDoc = readerContext.docBase + readerContext.reader().maxDoc();
        }

        if (readerContext != null) {
            // We advanced to another segment:
            docBase = readerContext.docBase;
            scorer = weight.scorer(readerContext);
        }

        if (scorer != null) {
            int targetDoc = docID - docBase;
            int actualDoc = scorer.docID();
            if (actualDoc < targetDoc) {
                actualDoc = scorer.iterator().advance(targetDoc);
            }
            if (actualDoc == targetDoc) {
                // Scoring will trigger log collection
                scorer.score();
            }
        }

        hitUpto++;
    }
}