类org.apache.lucene.search.DocIdSet源码实例Demo

下面列出了怎么用org.apache.lucene.search.DocIdSet的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: Elasticsearch   文件: ParentQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null);
    // we forcefully apply live docs here so that deleted children don't give matching parents
    childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs());
    if (Lucene.isEmpty(childrenDocSet)) {
        return null;
    }
    final DocIdSetIterator childIterator = childrenDocSet.iterator();
    if (childIterator == null) {
        return null;
    }
    SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (bytesValues == null) {
        return null;
    }

    return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues);
}
 
源代码2 项目: Elasticsearch   文件: ParentConstantScoreQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null);
    if (Lucene.isEmpty(childrenDocIdSet)) {
        return null;
    }

    SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
    if (globalValues != null) {
        // we forcefully apply live docs here so that deleted children don't give matching parents
        childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
        if (innerIterator != null) {
            ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
                    innerIterator, parentOrds, globalValues
            );
            return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
        }
    }
    return null;
}
 
源代码3 项目: lucene-solr   文件: AbstractPrefixTreeQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      DocIdSet docSet = getDocIdSet(context);
      if (docSet == null) {
        return null;
      }
      DocIdSetIterator disi = docSet.iterator();
      if (disi == null) {
        return null;
      }
      return new ConstantScoreScorer(this, score(), scoreMode, disi);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return true;
    }
  };
}
 
源代码4 项目: lucene-solr   文件: IntersectsRPTVerifyQuery.java
@Override
protected DocIdSet finish() throws IOException {
  if (exactIsEmpty) {
    exactDocIdSet = null;
  } else {
    exactDocIdSet = exactBuilder.build();
  }
  if (approxIsEmpty) {
    approxDocIdSet = exactDocIdSet;//optimization
  } else {
    if (exactDocIdSet != null) {
      approxBuilder.add(exactDocIdSet.iterator());
    }
    approxDocIdSet = approxBuilder.build();
  }
  return null;//unused in this weird re-use of AVPTQ
}
 
源代码5 项目: lucene-solr   文件: BaseBitSetTestCase.java
private DocIdSet randomCopy(BitSet set, int numBits) throws IOException {
  switch (random().nextInt(5)) {
    case 0:
      return new BitDocIdSet(set, set.cardinality());
    case 1:
      return new BitDocIdSet(copyOf(set, numBits), set.cardinality());
    case 2:
      final RoaringDocIdSet.Builder builder = new RoaringDocIdSet.Builder(numBits);
      for (int i = set.nextSetBit(0); i != DocIdSetIterator.NO_MORE_DOCS; i = i + 1 >= numBits ? DocIdSetIterator.NO_MORE_DOCS : set.nextSetBit(i + 1)) {
        builder.add(i);
      }
      return builder.build();
    case 3:
      FixedBitSet fbs = new FixedBitSet(numBits);
      fbs.or(new BitSetIterator(set, 0));
      return new BitDocIdSet(fbs);
    case 4:
      SparseFixedBitSet sfbs = new SparseFixedBitSet(numBits);
      sfbs.or(new BitSetIterator(set, 0));
      return new BitDocIdSet(sfbs);
    default:
      fail();
      return null;
  }
}
 
源代码6 项目: lucene-solr   文件: BaseBitSetTestCase.java
private void testOr(float load) throws IOException {
  final int numBits = 1 + random().nextInt(100000);
  BitSet set1 = new JavaUtilBitSet(randomSet(numBits, 0), numBits); // empty
  T set2 = copyOf(set1, numBits);
  
  final int iterations = atLeast(10);
  for (int iter = 0; iter < iterations; ++iter) {
    DocIdSet otherSet = randomCopy(new JavaUtilBitSet(randomSet(numBits, load), numBits), numBits);
    DocIdSetIterator otherIterator = otherSet.iterator();
    if (otherIterator != null) {
      set1.or(otherIterator);
      set2.or(otherSet.iterator());
      assertEquals(set1, set2, numBits);
    }
  }
}
 
源代码7 项目: lucene-solr   文件: TestDocIdSetBuilder.java
private void assertEquals(DocIdSet d1, DocIdSet d2) throws IOException {
  if (d1 == null) {
    if (d2 != null) {
      assertEquals(DocIdSetIterator.NO_MORE_DOCS, d2.iterator().nextDoc());
    }
  } else if (d2 == null) {
    assertEquals(DocIdSetIterator.NO_MORE_DOCS, d1.iterator().nextDoc());
  } else {
    DocIdSetIterator i1 = d1.iterator();
    DocIdSetIterator i2 = d2.iterator();
    for (int doc = i1.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = i1.nextDoc()) {
      assertEquals(doc, i2.nextDoc());
    }
    assertEquals(DocIdSetIterator.NO_MORE_DOCS, i2.nextDoc());
  }
}
 
源代码8 项目: lucene-solr   文件: TestDocIdSetBuilder.java
public void testSparse() throws IOException {
  final int maxDoc = 1000000 + random().nextInt(1000000);
  DocIdSetBuilder builder = new DocIdSetBuilder(maxDoc);
  final int numIterators = 1 + random().nextInt(10);
  final FixedBitSet ref = new FixedBitSet(maxDoc);
  for (int i = 0; i < numIterators; ++i) {
    final int baseInc = 200000 + random().nextInt(10000);
    RoaringDocIdSet.Builder b = new RoaringDocIdSet.Builder(maxDoc);
    for (int doc = random().nextInt(100); doc < maxDoc; doc += baseInc + random().nextInt(10000)) {
      b.add(doc);
      ref.set(doc);
    }
    builder.add(b.build().iterator());
  }
  DocIdSet result = builder.build();
  assertTrue(result instanceof IntArrayDocIdSet);
  assertEquals(new BitDocIdSet(ref), result);
}
 
源代码9 项目: lucene-solr   文件: TestDocIdSetBuilder.java
public void testDense() throws IOException {
  final int maxDoc = 1000000 + random().nextInt(1000000);
  DocIdSetBuilder builder = new DocIdSetBuilder(maxDoc);
  final int numIterators = 1 + random().nextInt(10);
  final FixedBitSet ref = new FixedBitSet(maxDoc);
  for (int i = 0; i < numIterators; ++i) {
    RoaringDocIdSet.Builder b = new RoaringDocIdSet.Builder(maxDoc);
    for (int doc = random().nextInt(1000); doc < maxDoc; doc += 1 + random().nextInt(100)) {
      b.add(doc);
      ref.set(doc);
    }
    builder.add(b.build().iterator());
  }
  DocIdSet result = builder.build();
  assertTrue(result instanceof BitDocIdSet);
  assertEquals(new BitDocIdSet(ref), result);
}
 
源代码10 项目: lucene-solr   文件: ValueSourceRangeFilter.java
@Override
@SuppressWarnings({"rawtypes"})
public DocIdSet getDocIdSet(final Map context, final LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
  // NB the IndexSearcher parameter here can be null because Filter Weights don't
  // actually use it.
  Weight weight = createWeight(null, ScoreMode.COMPLETE, 1);
  return BitsFilteredDocIdSet.wrap(new DocIdSet() {
     @Override
     public DocIdSetIterator iterator() throws IOException {
       @SuppressWarnings({"unchecked"})
       Scorer scorer = valueSource.getValues(context, readerContext).getRangeScorer(weight, readerContext, lowerVal, upperVal, includeLower, includeUpper);
       return scorer == null ? null : scorer.iterator();
     }
     @Override
     public Bits bits() {
       return null;  // don't use random access
     }

     @Override
     public long ramBytesUsed() {
       return 0L;
     }
   }, acceptDocs);
}
 
源代码11 项目: lucene-solr   文件: CrossCollectionJoinQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  if (filter == null) {
    filter = getDocSet().getTopFilter();
  }

  DocIdSet readerSet = filter.getDocIdSet(context, null);
  if (readerSet == null) {
    return null;
  }
  DocIdSetIterator readerSetIterator = readerSet.iterator();
  if (readerSetIterator == null) {
    return null;
  }
  return new ConstantScoreScorer(this, score(), scoreMode, readerSetIterator);
}
 
public static DocIdSet getFullyEmptyDocIdSet(int maxDoc) {
  Bits bits = getFullyEmptyBits(maxDoc);
  return new DocIdSet() {
    @Override
    public DocIdSetIterator iterator() throws IOException {
      return getFullyEmptyDocIdSetIterator(maxDoc);
    }

    @Override
    public Bits bits() throws IOException {
      return bits;
    }

    @Override
    public boolean isCacheable() {
      return true;
    }
  };
}
 
public static DocIdSet getFullySetDocIdSet(int maxDoc) {
  Bits bits = getFullySetBits(maxDoc);
  return new DocIdSet() {
    @Override
    public DocIdSetIterator iterator() throws IOException {
      return getFullySetDocIdSetIterator(maxDoc);
    }

    @Override
    public Bits bits() throws IOException {
      return bits;
    }

    @Override
    public boolean isCacheable() {
      return true;
    }
  };
}
 
源代码14 项目: incubator-retired-blur   文件: FilterCache.java
private DocIdSet docIdSetToCache(DocIdSet docIdSet, AtomicReader reader, String segmentName, Directory directory)
    throws IOException {
  if (docIdSet == null) {
    // this is better than returning null, as the nonnull result can be cached
    return DocIdSet.EMPTY_DOCIDSET;
  } else if (docIdSet.isCacheable()) {
    return docIdSet;
  } else {
    final DocIdSetIterator it = docIdSet.iterator();
    // null is allowed to be returned by iterator(),
    // in this case we wrap with the empty set,
    // which is cacheable.
    if (it == null) {
      return DocIdSet.EMPTY_DOCIDSET;
    } else {
      final IndexFileBitSet bits = new IndexFileBitSet(reader.maxDoc(), _id, segmentName, directory);
      if (!bits.exists()) {
        bits.create(it);
      }
      bits.load();
      return bits;
    }
  }
}
 
源代码15 项目: Elasticsearch   文件: ChildrenQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    DocIdSet parentsSet = parentFilter.getDocIdSet(context, null);
    if (Lucene.isEmpty(parentsSet) || remaining == 0) {
        return null;
    }

    // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
    // count down (short circuit) logic will then work as expected.
    DocIdSetIterator parents = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();

    if (parents != null) {
        SortedDocValues bytesValues = collector.globalIfd.load(context).getOrdinalsValues(parentType);
        if (bytesValues == null) {
            return null;
        }

        if (minChildren > 0 || maxChildren != 0 || scoreType == ScoreType.NONE) {
            switch (scoreType) {
            case NONE:
                DocIdSetIterator parentIdIterator = new CountParentOrdIterator(this, parents, collector, bytesValues,
                        minChildren, maxChildren);
                return ConstantScorer.create(parentIdIterator, this, queryWeight);
            case AVG:
                return new AvgParentCountScorer(this, parents, collector, bytesValues, minChildren, maxChildren);
            default:
                return new ParentCountScorer(this, parents, collector, bytesValues, minChildren, maxChildren);
            }
        }
        switch (scoreType) {
        case AVG:
            return new AvgParentScorer(this, parents, collector, bytesValues);
        default:
            return new ParentScorer(this, parents, collector, bytesValues);
        }
    }
    return null;
}
 
源代码16 项目: Elasticsearch   文件: ChildrenConstantScoreQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    if (remaining == 0) {
        return null;
    }

    if (shortCircuitFilter != null) {
        DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, null);
        if (!Lucene.isEmpty(docIdSet)) {
            DocIdSetIterator iterator = docIdSet.iterator();
            if (iterator != null) {
                return ConstantScorer.create(iterator, this, queryWeight);
            }
        }
        return null;
    }

    DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, null);
    if (!Lucene.isEmpty(parentDocIdSet)) {
        // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
        // count down (short circuit) logic will then work as expected.
        parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
        DocIdSetIterator innerIterator = parentDocIdSet.iterator();
        if (innerIterator != null) {
            LongBitSet parentOrds = collector.parentOrds;
            SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType);
            if (globalValues != null) {
                DocIdSetIterator parentIdIterator = new ParentOrdIterator(innerIterator, parentOrds, globalValues, this);
                return ConstantScorer.create(parentIdIterator, this, queryWeight);
            }
        }
    }
    return null;
}
 
源代码17 项目: lucene-solr   文件: FacetsCollector.java
/** Sole constructor. */
public MatchingDocs(LeafReaderContext context, DocIdSet bits, int totalHits, float[] scores) {
  this.context = context;
  this.bits = bits;
  this.scores = scores;
  this.totalHits = totalHits;
}
 
源代码18 项目: lucene-solr   文件: BaseDocIdSetTestCase.java
/** Test ram usage estimation. */
public void testRamBytesUsed() throws IOException {
  Random random = random();
  final int iters = 100;
  for (int i = 0; i < iters; ++i) {
    final int pow = random.nextInt(20);
    final int maxDoc = TestUtil.nextInt(random, 1, 1 << pow);
    final int numDocs = TestUtil.nextInt(random, 0, Math.min(maxDoc, 1 << TestUtil.nextInt(random, 0, pow)));
    final BitSet set = randomSet(maxDoc, numDocs);
    final DocIdSet copy = copyOf(set, maxDoc);
    final long actualBytes = ramBytesUsed(copy, maxDoc);
    final long expectedBytes = copy.ramBytesUsed();
    assertEquals(expectedBytes, actualBytes);
  }
}
 
源代码19 项目: lucene-solr   文件: BaseDocIdSetTestCase.java
private long ramBytesUsed(DocIdSet set, int length) throws IOException {
  Dummy dummy = new Dummy();
  dummy.o1 = copyOf(new BitSet(length), length);
  dummy.o2 = set;
  long bytes1 = RamUsageTester.sizeOf(dummy);
  dummy.o2 = null;
  long bytes2 = RamUsageTester.sizeOf(dummy);
  return bytes1 - bytes2;
}
 
源代码20 项目: lucene-solr   文件: DocIdSetBuilder.java
/**
 * Build a {@link DocIdSet} from the accumulated doc IDs.
 */
public DocIdSet build() {
  try {
    if (bitSet != null) {
      assert counter >= 0;
      final long cost = Math.round(counter / numValuesPerDoc);
      return new BitDocIdSet(bitSet, cost);
    } else {
      Buffer concatenated = concat(buffers);
      LSBRadixSorter sorter = new LSBRadixSorter();
      sorter.sort(PackedInts.bitsRequired(maxDoc - 1), concatenated.array, concatenated.length);
      final int l;
      if (multivalued) {
        l = dedup(concatenated.array, concatenated.length);
      } else {
        assert noDups(concatenated.array, concatenated.length);
        l = concatenated.length;
      }
      assert l <= concatenated.length;
      concatenated.array[l] = DocIdSetIterator.NO_MORE_DOCS;
      return new IntArrayDocIdSet(concatenated.array, l);
    }
  } finally {
    this.buffers = null;
    this.bitSet = null;
  }
}
 
源代码21 项目: lucene-solr   文件: RoaringDocIdSet.java
/** Sole constructor. */
public Builder(int maxDoc) {
  this.maxDoc = maxDoc;
  sets = new DocIdSet[(maxDoc + (1 << 16) - 1) >>> 16];
  lastDocId = -1;
  currentBlock = -1;
  buffer = new short[MAX_ARRAY_LENGTH];
}
 
源代码22 项目: lucene-solr   文件: RoaringDocIdSet.java
private RoaringDocIdSet(DocIdSet[] docIdSets, int cardinality) {
  this.docIdSets = docIdSets;
  long ramBytesUsed = BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOf(docIdSets);
  for (DocIdSet set : this.docIdSets) {
    if (set != null) {
      ramBytesUsed += set.ramBytesUsed();
    }
  }
  this.ramBytesUsed = ramBytesUsed;
  this.cardinality = cardinality;
}
 
源代码23 项目: lucene-solr   文件: AnalyticsDriver.java
/**
 * Drive the collection of reduction data. This includes overall data as well as faceted data.
 *
 * @param manager of the request to drive
 * @param searcher the results of the query
 * @param filter that represents the overall query
 * @param queryRequest used for the search request
 * @throws IOException if an error occurs while reading from Solr
 */
public static void drive(AnalyticsRequestManager manager, SolrIndexSearcher searcher, Filter filter, SolrQueryRequest queryRequest) throws IOException {
  StreamingInfo streamingInfo = manager.getStreamingFacetInfo();
  Iterable<StreamingFacet> streamingFacets = streamingInfo.streamingFacets;
  ReductionCollectionManager collectionManager = streamingInfo.streamingCollectionManager;

  Iterable<FacetValueQueryExecuter> facetExecuters = manager.getFacetExecuters(filter, queryRequest);

  // Streaming phase (Overall results & Value/Pivot Facets)
  // Loop through all documents and collect reduction data for streaming facets and overall results
  if (collectionManager.needsCollection()) {
    List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
    for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
      LeafReaderContext context = contexts.get(leafNum);
      DocIdSet dis = filter.getDocIdSet(context, null); // solr docsets already exclude any deleted docs
      if (dis == null) {
        continue;
      }
      DocIdSetIterator disi = dis.iterator();
      if (disi != null) {
        collectionManager.doSetNextReader(context);
        int doc = disi.nextDoc();
        while( doc != DocIdSetIterator.NO_MORE_DOCS){
          // Add a document to the statistics being generated
          collectionManager.collect(doc);
          streamingFacets.forEach( facet -> facet.addFacetValueCollectionTargets() );
          collectionManager.apply();
          doc = disi.nextDoc();
        }
      }
    }
  }

  // Executing phase (Query/Range Facets)
  // Send additional Solr Queries to compute facet values
  for (FacetValueQueryExecuter executer : facetExecuters) {
    executer.execute(searcher);
  }
}
 
源代码24 项目: lucene-solr   文件: SolrRangeQuery.java
private Scorer scorer(DocIdSet set) throws IOException {
  if (set == null) {
    return null;
  }
  final DocIdSetIterator disi = set.iterator();
  if (disi == null) {
    return null;
  }
  return new ConstantScoreScorer(this, score(), scoreMode, disi);
}
 
源代码25 项目: lucene-solr   文件: GraphTermsQParserPlugin.java
@Override
public final Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, boost) {
    Filter filter;

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      if (filter == null) {
        DocSet set = getDocSet(searcher);
        filter = set.getTopFilter();
      }

      // Although this set only includes live docs, other filters can be pushed down to queries.
      DocIdSet readerSet = filter.getDocIdSet(context, null);
      if (readerSet == null) {
        return null;
      }
      DocIdSetIterator readerSetIterator = readerSet.iterator();
      if (readerSetIterator == null) {
        return null;
      }
      return new ConstantScoreScorer(this, score(), scoreMode, readerSetIterator);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return true;
    }
  };
}
 
源代码26 项目: lucene-solr   文件: SolrConstantScoreQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  DocIdSet docIdSet = filter instanceof SolrFilter ? ((SolrFilter)filter).getDocIdSet(this.context, context, null) : filter.getDocIdSet(context, null);
  if (docIdSet == null) {
    return null;
  }
  DocIdSetIterator iterator = docIdSet.iterator();
  if (iterator == null) {
    return null;
  }
  return new ConstantScoreScorer(this, score(), scoreMode, iterator);
}
 
源代码27 项目: lucene-solr   文件: GraphQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  if (filter == null) {
    resultSet = getDocSet();
    filter = resultSet.getTopFilter();
  }
  DocIdSet readerSet = filter.getDocIdSet(context,context.reader().getLiveDocs());
  // create a scrorer on the result set, if results from right query are empty, use empty iterator.
  return new GraphScorer(this, readerSet == null ? DocIdSetIterator.empty() : readerSet.iterator(), 1);
}
 
源代码28 项目: lucene-solr   文件: TestDocSet.java
public void doTestIteratorEqual(DocIdSet a, DocIdSet b) throws IOException {
  DocIdSetIterator ia = a.iterator();
  DocIdSetIterator ib = b.iterator();

  // test for next() equivalence
  for(;;) {
    int da = ia.nextDoc();
    int db = ib.nextDoc();
    assertEquals(da, db);
    assertEquals(ia.docID(), ib.docID());
    if (da==DocIdSetIterator.NO_MORE_DOCS) break;
  }

  for (int i=0; i<10; i++) {
    // test random skipTo() and next()
    ia = a.iterator();
    ib = b.iterator();
    int doc = -1;
    for (;;) {
      int da,db;
      if (rand.nextBoolean()) {
        da = ia.nextDoc();
        db = ib.nextDoc();
      } else {
        int target = doc + rand.nextInt(10) + 1;  // keep in mind future edge cases like probing (increase if necessary)
        da = ia.advance(target);
        db = ib.advance(target);
      }

      assertEquals(da, db);
      assertEquals(ia.docID(), ib.docID());
      if (da==DocIdSetIterator.NO_MORE_DOCS) break;
      doc = da;
    }
  }
}
 
源代码29 项目: lucene-solr   文件: TestSort.java
public DocIdSet randSet(int sz) {
  FixedBitSet obs = new FixedBitSet(sz);
  int n = r.nextInt(sz);
  for (int i=0; i<n; i++) {
    obs.set(r.nextInt(sz));
  }
  return new BitDocIdSet(obs);
}
 
源代码30 项目: incubator-retired-blur   文件: IndexManager.java
@SuppressWarnings("unchecked")
private static boolean isFiltered(int notAdjustedDocId, IndexReader reader, Filter filter) throws IOException {
  if (filter == null) {
    return false;
  }
  if (reader instanceof BaseCompositeReader) {
    BaseCompositeReader<IndexReader> indexReader = (BaseCompositeReader<IndexReader>) reader;
    List<? extends IndexReader> sequentialSubReaders = BaseCompositeReaderUtil.getSequentialSubReaders(indexReader);
    int readerIndex = BaseCompositeReaderUtil.readerIndex(indexReader, notAdjustedDocId);
    int readerBase = BaseCompositeReaderUtil.readerBase(indexReader, readerIndex);
    int docId = notAdjustedDocId - readerBase;
    IndexReader orgReader = sequentialSubReaders.get(readerIndex);
    SegmentReader sReader = AtomicReaderUtil.getSegmentReader(orgReader);
    if (sReader != null) {
      SegmentReader segmentReader = (SegmentReader) sReader;
      DocIdSet docIdSet = filter.getDocIdSet(segmentReader.getContext(), segmentReader.getLiveDocs());
      DocIdSetIterator iterator = docIdSet.iterator();
      if (iterator == null) {
        return true;
      }
      if (iterator.advance(docId) == docId) {
        return false;
      }
      return true;
    }
    throw new RuntimeException("Reader has to be a SegmentReader [" + orgReader + "]");
  } else {
    throw new RuntimeException("Reader has to be a BaseCompositeReader [" + reader + "]");
  }
}
 
 类所在包
 类方法
 同包方法