org.apache.lucene.search.DocIdSetIterator#nextDoc ( )源码实例Demo

下面列出了org.apache.lucene.search.DocIdSetIterator#nextDoc ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: TaggerRequestHandler.java
private DocList getDocList(int rows, FixedBitSet matchDocIdsBS) throws IOException {
  //Now we must supply a Solr DocList and add it to the response.
  //  Typically this is gotten via a SolrIndexSearcher.search(), but in this case we
  //  know exactly what documents to return, the order doesn't matter nor does
  //  scoring.
  //  Ideally an implementation of DocList could be directly implemented off
  //  of a BitSet, but there are way too many methods to implement for a minor
  //  payoff.
  int matchDocs = matchDocIdsBS.cardinality();
  int[] docIds = new int[ Math.min(rows, matchDocs) ];
  DocIdSetIterator docIdIter = new BitSetIterator(matchDocIdsBS, 1);
  for (int i = 0; i < docIds.length; i++) {
    docIds[i] = docIdIter.nextDoc();
  }
  return new DocSlice(0, docIds.length, docIds, null, matchDocs, 1f, TotalHits.Relation.EQUAL_TO);
}
 
源代码2 项目: lucene-solr   文件: IntervalFacets.java
private void accumIntervalsSingle(SortedDocValues sdv, DocIdSetIterator disi, Bits bits) throws IOException {
  // First update the ordinals in the intervals to this segment
  for (FacetInterval interval : intervals) {
    interval.updateContext(sdv);
  }
  int doc;
  while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    if (bits != null && bits.get(doc) == false) {
      continue;
    }
    if (doc > sdv.docID()) {
      sdv.advance(doc);
    }
    if (doc == sdv.docID()) {
      accumInterval(sdv.ordValue());
    }
  }
}
 
源代码3 项目: lucene-solr   文件: ExportWriter.java
protected void identifyLowestSortingUnexportedDocs(List<LeafReaderContext> leaves, SortDoc sortDoc, SortQueue queue) throws IOException {
  queue.reset();
  SortDoc top = queue.top();
  for (int i = 0; i < leaves.size(); i++) {
    sortDoc.setNextReader(leaves.get(i));
    DocIdSetIterator it = new BitSetIterator(sets[i], 0); // cost is not useful here
    int docId;
    while ((docId = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
      sortDoc.setValues(docId);
      if (top.lessThan(sortDoc)) {
        top.setValues(sortDoc);
        top = queue.updateTop();
      }
    }
  }
}
 
源代码4 项目: lucene-solr   文件: DocIdSetBuilder.java
/**
 * Add the content of the provided {@link DocIdSetIterator} to this builder.
 * NOTE: if you need to build a {@link DocIdSet} out of a single
 * {@link DocIdSetIterator}, you should rather use {@link RoaringDocIdSet.Builder}.
 */
public void add(DocIdSetIterator iter) throws IOException {
  if (bitSet != null) {
    bitSet.or(iter);
    return;
  }
  int cost = (int) Math.min(Integer.MAX_VALUE, iter.cost());
  BulkAdder adder = grow(cost);
  for (int i = 0; i < cost; ++i) {
    int doc = iter.nextDoc();
    if (doc == DocIdSetIterator.NO_MORE_DOCS) {
      return;
    }
    adder.add(doc);
  }
  for (int doc = iter.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iter.nextDoc()) {
    grow(1).add(doc);
  }
}
 
源代码5 项目: crate   文件: GroupingLongCollectorBenchmark.java
@Benchmark
public LongObjectHashMap<Long> measureGroupingOnNumericDocValues() throws Exception {
    Weight weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
    LeafReaderContext leaf = searcher.getTopReaderContext().leaves().get(0);
    Scorer scorer = weight.scorer(leaf);
    NumericDocValues docValues = DocValues.getNumeric(leaf.reader(), "x");
    DocIdSetIterator docIt = scorer.iterator();
    LongObjectHashMap<Long> sumByKey = new LongObjectHashMap<>();
    for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) {
        if (docValues.advanceExact(docId)) {
            long number = docValues.longValue();
            sumByKey.compute(number, (key, oldValue) -> {
                if (oldValue == null) {
                    return number;
                } else {
                    return oldValue + number;
                }
            });
        }
    }
    return sumByKey;
}
 
源代码6 项目: lucene-solr   文件: RoaringDocIdSet.java
/** Add the content of the provided {@link DocIdSetIterator}. */
public Builder add(DocIdSetIterator disi) throws IOException {
  for (int doc = disi.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = disi.nextDoc()) {
    add(doc);
  }
  return this;
}
 
源代码7 项目: lucene-solr   文件: DocValuesFacets.java
/** "typical" multi-valued faceting: not too many unique values, no prefixing. maps to global ordinals as a separate step */
static void accumMultiSeg(int counts[], SortedSetDocValues si, DocIdSetIterator disi, int subIndex, OrdinalMap map) throws IOException {
  // First count in seg-ord space:
  final int segCounts[];
  if (map == null) {
    segCounts = counts;
  } else {
    segCounts = new int[1+(int)si.getValueCount()];
  }
  
  int doc;
  while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    if (si.advanceExact(doc)) {
      int term = (int) si.nextOrd();
      do {
        segCounts[1+term]++;
      } while ((term = (int)si.nextOrd()) >= 0);
    } else {
      counts[0]++; // missing
    }
  }
  
  // migrate to global ords (if necessary)
  if (map != null) {
    migrateGlobal(counts, segCounts, subIndex, map);
  }
}
 
源代码8 项目: lucene-solr   文件: TestFixedBitSet.java
void doIterate2(java.util.BitSet a, FixedBitSet b) throws IOException {
  assertEquals(a.cardinality(), b.cardinality());
  int aa=-1,bb=-1;
  DocIdSetIterator iterator = new BitSetIterator(b, 0);
  do {
    aa = a.nextSetBit(aa+1);
    bb = random().nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1);
    assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
  } while (aa>=0);
}
 
private void collectDocs(SortedSetDocValues multiDv, DocIdSetIterator disi, LongValues toGlobal) throws IOException {
  int doc;
  while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    if (multiDv.advanceExact(doc)) {
      for(;;) {
        int segOrd = (int)multiDv.nextOrd();
        if (segOrd < 0) break;
        collect(doc, segOrd, toGlobal);
      }
    }
  }
}
 
源代码10 项目: lucene-solr   文件: LongValueFacetCounts.java
private void countOneSegment(NumericDocValues values, MatchingDocs hits) throws IOException {
  DocIdSetIterator it = ConjunctionDISI.intersectIterators(
                           Arrays.asList(hits.bits.iterator(), values));

  for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
    increment(values.longValue());
    totCount++;
  }
}
 
源代码11 项目: lucene-solr   文件: FacetFieldProcessorByArrayDV.java
private void collectDocs(SortedDocValues singleDv, DocIdSetIterator disi, LongValues toGlobal) throws IOException {
  int doc;
  while ((doc = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    if (singleDv.advanceExact(doc)) {
      int segOrd = singleDv.ordValue();
      collect(doc, segOrd, toGlobal);
    }
  }
}
 
源代码12 项目: lucene-solr   文件: AnalyticsDriver.java
/**
 * Drive the collection of reduction data. This includes overall data as well as faceted data.
 *
 * @param manager of the request to drive
 * @param searcher the results of the query
 * @param filter that represents the overall query
 * @param queryRequest used for the search request
 * @throws IOException if an error occurs while reading from Solr
 */
public static void drive(AnalyticsRequestManager manager, SolrIndexSearcher searcher, Filter filter, SolrQueryRequest queryRequest) throws IOException {
  StreamingInfo streamingInfo = manager.getStreamingFacetInfo();
  Iterable<StreamingFacet> streamingFacets = streamingInfo.streamingFacets;
  ReductionCollectionManager collectionManager = streamingInfo.streamingCollectionManager;

  Iterable<FacetValueQueryExecuter> facetExecuters = manager.getFacetExecuters(filter, queryRequest);

  // Streaming phase (Overall results & Value/Pivot Facets)
  // Loop through all documents and collect reduction data for streaming facets and overall results
  if (collectionManager.needsCollection()) {
    List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
    for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
      LeafReaderContext context = contexts.get(leafNum);
      DocIdSet dis = filter.getDocIdSet(context, null); // solr docsets already exclude any deleted docs
      if (dis == null) {
        continue;
      }
      DocIdSetIterator disi = dis.iterator();
      if (disi != null) {
        collectionManager.doSetNextReader(context);
        int doc = disi.nextDoc();
        while( doc != DocIdSetIterator.NO_MORE_DOCS){
          // Add a document to the statistics being generated
          collectionManager.collect(doc);
          streamingFacets.forEach( facet -> facet.addFacetValueCollectionTargets() );
          collectionManager.apply();
          doc = disi.nextDoc();
        }
      }
    }
  }

  // Executing phase (Query/Range Facets)
  // Send additional Solr Queries to compute facet values
  for (FacetValueQueryExecuter executer : facetExecuters) {
    executer.execute(searcher);
  }
}
 
源代码13 项目: lucene-solr   文件: PointValues.java
/** Similar to {@link IntersectVisitor#visit(int, byte[])} but in this case the packedValue
 * can have more than one docID associated to it. The provided iterator should not escape the
 * scope of this method so that implementations of PointValues are free to reuse it,*/
default void visit(DocIdSetIterator iterator, byte[] packedValue) throws IOException {
  int docID;
  while ((docID = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    visit(docID, packedValue);
  }
}
 
源代码14 项目: lucene-solr   文件: FieldCacheImpl.java
private BitsEntry createValueDocValues(LeafReader reader, String field) throws IOException {
  FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field);
  
  DocValuesType dvType = fieldInfo.getDocValuesType();
  DocIdSetIterator iterator;
  switch(dvType) {
  case NUMERIC:
    iterator = reader.getNumericDocValues(field);
    break;
  case BINARY:
    iterator = reader.getBinaryDocValues(field);
    break;
  case SORTED:
    iterator = reader.getSortedDocValues(field);
    break;
  case SORTED_NUMERIC:
    iterator = reader.getSortedNumericDocValues(field);
    break;
  case SORTED_SET:
    iterator = reader.getSortedSetDocValues(field);
    break;
  default:
    throw new AssertionError();
  }

  FixedBitSet bits = new FixedBitSet(reader.maxDoc());
  while (true) {
    int docID = iterator.nextDoc();
    if (docID == DocIdSetIterator.NO_MORE_DOCS) {
      break;
    }
    bits.set(docID);
  }

  return new BitsEntry(bits);
}
 
源代码15 项目: lucene-solr   文件: CollapsingQParserPlugin.java
public void finish() throws IOException {
  if(contexts.length == 0) {
    return;
  }

  int currentContext = 0;
  int currentDocBase = 0;
  this.collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.collapseField);
  int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
  leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
  ScoreAndDoc dummy = new ScoreAndDoc();
  leafDelegate.setScorer(dummy);
  DocIdSetIterator it = new BitSetIterator(collapseStrategy.getCollapsedSet(), 0); // cost is not useful here
  int globalDoc = -1;
  int nullScoreIndex = 0;
  IntIntHashMap cmap = collapseStrategy.getCollapseMap();
  IntFloatDynamicMap scores = collapseStrategy.getScores();
  FloatArrayList nullScores = collapseStrategy.getNullScores();
  MergeBoost mergeBoost = collapseStrategy.getMergeBoost();
  float nullScore = collapseStrategy.getNullScore();

  while((globalDoc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {

    while(globalDoc >= nextDocBase) {
      currentContext++;
      currentDocBase = contexts[currentContext].docBase;
      nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
      leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
      leafDelegate.setScorer(dummy);
      this.collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.collapseField);
    }

    int contextDoc = globalDoc-currentDocBase;

    if(this.needsScores){
      int collapseValue;
      if (collapseValues.advanceExact(contextDoc)) {
        collapseValue = (int) collapseValues.longValue();
      } else {
        collapseValue = 0;
      }
      
      if(collapseValue != nullValue) {
        int pointer = cmap.get(collapseValue);
        dummy.score = scores.get(pointer);
      } else if (mergeBoost != null && mergeBoost.boost(globalDoc)) {
        //Its an elevated doc so no score is needed
        dummy.score = 0F;
      } else if (nullPolicy == CollapsingPostFilter.NULL_POLICY_COLLAPSE) {
        dummy.score = nullScore;
      } else if(nullPolicy == CollapsingPostFilter.NULL_POLICY_EXPAND) {
        dummy.score = nullScores.get(nullScoreIndex++);
      }
    }

    dummy.docId = contextDoc;
    leafDelegate.collect(contextDoc);
  }

  if(delegate instanceof DelegatingCollector) {
    ((DelegatingCollector) delegate).finish();
  }
}
 
源代码16 项目: Elasticsearch   文件: FetchPhase.java
private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException {
    int currentParent = nestedSubDocId;
    ObjectMapper nestedParentObjectMapper;
    ObjectMapper current = nestedObjectMapper;
    String originalName = nestedObjectMapper.name();
    InternalSearchHit.InternalNestedIdentity nestedIdentity = null;
    do {
        Query parentFilter;
        nestedParentObjectMapper = documentMapper.findParentObjectMapper(current);
        if (nestedParentObjectMapper != null) {
            if (nestedParentObjectMapper.nested().isNested() == false) {
                current = nestedParentObjectMapper;
                continue;
            }
            parentFilter = nestedParentObjectMapper.nestedTypeFilter();
        } else {
            parentFilter = Queries.newNonNestedFilter();
        }

        Query childFilter = nestedObjectMapper.nestedTypeFilter();
        if (childFilter == null) {
            current = nestedParentObjectMapper;
            continue;
        }
        final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false);
        Scorer childScorer = childWeight.scorer(subReaderContext);
        if (childScorer == null) {
            current = nestedParentObjectMapper;
            continue;
        }
        DocIdSetIterator childIter = childScorer.iterator();

        BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext);

        int offset = 0;
        int nextParent = parentBits.nextSetBit(currentParent);
        for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) {
            offset++;
        }
        currentParent = nextParent;
        current = nestedObjectMapper = nestedParentObjectMapper;
        int currentPrefix = current == null ? 0 : current.name().length() + 1;
        nestedIdentity = new InternalSearchHit.InternalNestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity);
        if (current != null) {
            originalName = current.name();
        }
    } while (current != null);
    return nestedIdentity;
}
 
源代码17 项目: lucene-solr   文件: SparseFixedBitSet.java
/**
 * {@link #or(DocIdSetIterator)} impl that works best when <code>it</code> is dense
 */
private void orDense(DocIdSetIterator it) throws IOException {
  checkUnpositioned(it);
  // The goal here is to try to take advantage of the ordering of documents
  // to build the data-structure more efficiently
  // NOTE: this heavily relies on the fact that shifts are mod 64
  final int firstDoc = it.nextDoc();
  if (firstDoc == DocIdSetIterator.NO_MORE_DOCS) {
    return;
  }
  int i4096 = firstDoc >>> 12;
  int i64 = firstDoc >>> 6;
  long index = 1L << i64;
  long currentLong = 1L << firstDoc;
  // we store at most 64 longs per block so preallocate in order never to have to resize
  long[] longs = new long[64];
  int numLongs = 0;

  for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
    final int doc64 = doc >>> 6;
    if (doc64 == i64) {
      // still in the same long, just set the bit
      currentLong |= 1L << doc;
    } else {
      longs[numLongs++] = currentLong;

      final int doc4096 = doc >>> 12;
      if (doc4096 == i4096) {
        index |= 1L << doc64;
      } else {
        // we are on a new block, flush what we buffered
        or(i4096, index, longs, numLongs);
        // and reset state for the new block
        i4096 = doc4096;
        index = 1L << doc64;
        numLongs = 0;
      }

      // we are on a new long, reset state
      i64 = doc64;
      currentLong = 1L << doc;
    }
  }

  // flush
  longs[numLongs++] = currentLong;
  or(i4096, index, longs, numLongs);
}
 
源代码18 项目: lucene-solr   文件: CollapsingQParserPlugin.java
@Override
public void finish() throws IOException {
  if(contexts.length == 0) {
    return;
  }

  if(nullScore > -1) {
    collapsedSet.set(nullDoc);
  }

  //Handle the boosted docs.
  if(this.boostKeys != null) {
    int s = boostKeys.size();
    for(int i=0; i<s; i++) {
      int key = this.boostKeys.get(i);
      if(key != nullValue) {
        cmap.remove(key);
      }
      //Add the boosted docs to the collapsedSet
      this.collapsedSet.set(boostDocs.get(i));
    }
  }

  Iterator<IntLongCursor> it1 = cmap.iterator();

  while(it1.hasNext()) {
    IntLongCursor cursor = it1.next();
    int doc = (int)cursor.value;
    collapsedSet.set(doc);
  }

  int currentContext = 0;
  int currentDocBase = 0;

  collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.field);
  int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
  leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
  ScoreAndDoc dummy = new ScoreAndDoc();
  leafDelegate.setScorer(dummy);
  DocIdSetIterator it = new BitSetIterator(collapsedSet, 0L); // cost is not useful here
  int globalDoc = -1;
  int nullScoreIndex = 0;
  while((globalDoc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {

    while(globalDoc >= nextDocBase) {
      currentContext++;
      currentDocBase = contexts[currentContext].docBase;
      nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
      leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
      leafDelegate.setScorer(dummy);
      collapseValues = DocValues.getNumeric(contexts[currentContext].reader(), this.field);
    }

    int contextDoc = globalDoc-currentDocBase;
    int collapseValue;
    if (collapseValues.advanceExact(contextDoc)) {
      collapseValue = (int) collapseValues.longValue();
    } else {
      collapseValue = 0;
    }

    if(collapseValue != nullValue) {
      long scoreDoc = cmap.get(collapseValue);
      dummy.score = Float.intBitsToFloat((int)(scoreDoc>>32));
    } else if(boosts && mergeBoost.boost(globalDoc)) {
      //Ignore so boosted documents don't mess up the null scoring policies.
    } else if (nullPolicy == CollapsingPostFilter.NULL_POLICY_COLLAPSE) {
      dummy.score = nullScore;
    } else if(nullPolicy == CollapsingPostFilter.NULL_POLICY_EXPAND) {
      dummy.score = nullScores.get(nullScoreIndex++);
    }

    dummy.docId = contextDoc;
    leafDelegate.collect(contextDoc);
  }

  if(delegate instanceof DelegatingCollector) {
    ((DelegatingCollector) delegate).finish();
  }
}
 
源代码19 项目: lucene-solr   文件: DocSetBuilder.java
public static void add(FixedBitSet bitSet, DocIdSetIterator iter, int base) throws IOException {
  for (int doc = iter.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iter.nextDoc()) {
    bitSet.set(doc + base);
  }
}
 
源代码20 项目: lucene-solr   文件: CollapsingQParserPlugin.java
public void finish() throws IOException {
  if(contexts.length == 0) {
    return;
  }

  int currentContext = 0;
  int currentDocBase = 0;

  this.collapseValues = collapseValuesProducer.getSorted(null);
  if(collapseValues instanceof MultiDocValues.MultiSortedDocValues) {
    this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)collapseValues;
    this.ordinalMap = multiSortedDocValues.mapping;
  }
  if(ordinalMap != null) {
    this.segmentValues = this.multiSortedDocValues.values[currentContext];
    this.segmentOrdinalMap = this.ordinalMap.getGlobalOrds(currentContext);
  } else {
    this.segmentValues = collapseValues;
  }

  int nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
  leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
  ScoreAndDoc dummy = new ScoreAndDoc();
  leafDelegate.setScorer(dummy);
  DocIdSetIterator it = new BitSetIterator(collapseStrategy.getCollapsedSet(), 0); // cost is not useful here
  int globalDoc = -1;
  int nullScoreIndex = 0;
  IntFloatDynamicMap scores = collapseStrategy.getScores();
  FloatArrayList nullScores = collapseStrategy.getNullScores();
  float nullScore = collapseStrategy.getNullScore();

  MergeBoost mergeBoost = collapseStrategy.getMergeBoost();
  while((globalDoc = it.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {


    while(globalDoc >= nextDocBase) {
      currentContext++;
      currentDocBase = contexts[currentContext].docBase;
      nextDocBase = currentContext+1 < contexts.length ? contexts[currentContext+1].docBase : maxDoc;
      leafDelegate = delegate.getLeafCollector(contexts[currentContext]);
      leafDelegate.setScorer(dummy);
      if(ordinalMap != null) {
        this.segmentValues = this.multiSortedDocValues.values[currentContext];
        this.segmentOrdinalMap = this.ordinalMap.getGlobalOrds(currentContext);
      }
    }

    int contextDoc = globalDoc-currentDocBase;

    if(this.needsScores){
      int ord = -1;
      if(this.ordinalMap != null) {
        //Handle ordinalMapping case
        if (segmentValues.advanceExact(contextDoc)) {
          ord = (int) segmentOrdinalMap.get(segmentValues.ordValue());
        }
      } else {
        //Handle top Level FieldCache or Single Segment Case
        if (segmentValues.advanceExact(globalDoc)) {
          ord = segmentValues.ordValue();
        }
      }

      if(ord > -1) {
        dummy.score = scores.get(ord);
      } else if (mergeBoost != null && mergeBoost.boost(globalDoc)) {
        //It's an elevated doc so no score is needed
        dummy.score = 0F;
      } else if (nullPolicy == CollapsingPostFilter.NULL_POLICY_COLLAPSE) {
        dummy.score = nullScore;
      } else if(nullPolicy == CollapsingPostFilter.NULL_POLICY_EXPAND) {
        dummy.score = nullScores.get(nullScoreIndex++);
      }
    }

    dummy.docId = contextDoc;
    leafDelegate.collect(contextDoc);
  }

  if(delegate instanceof DelegatingCollector) {
    ((DelegatingCollector) delegate).finish();
  }
}