类org.apache.lucene.index.AtomicReaderContext源码实例Demo

下面列出了怎么用org.apache.lucene.index.AtomicReaderContext的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: linden   文件: ShardWriter.java
/**
 * Process an intermediate form by carrying out, on the Lucene instance of
 * the shard, the deletes and the inserts (a ram index) in the form.
 * @param form  the intermediate form containing deletes and a ram index
 * @throws IOException
 */
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
  if (facetsConfig != null) {
    DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
    // merge the taxonomies
    taxoWriter.addTaxonomy(form.getTaxoDirectory(), map);
    int ordinalMap[] = map.getMap();
    DirectoryReader reader = DirectoryReader.open(form.getDirectory());
    try {
      List<AtomicReaderContext> leaves = reader.leaves();
      int numReaders = leaves.size();
      AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
      for (int i = 0; i < numReaders; i++) {
        wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig);
      }
      writer.addIndexes(new MultiReader(wrappedLeaves));
    } finally {
      reader.close();
    }
  } else {
    writer.addIndexes(new Directory[] { form.getDirectory() });
  }
  numForms++;
}
 
源代码2 项目: linden   文件: LindenScoreModelStrategy.java
public void preProcess(AtomicReaderContext context, LindenSchema schema, LindenScoreModel scoreModel) {
  this.context = context;
  fieldSchemaMap = new HashMap<>();
  for (LindenFieldSchema fieldSchema : schema.getFields()) {
    fieldSchemaMap.put(fieldSchema.getName(), fieldSchema);
  }
  // add id field
  if (!fieldSchemaMap.containsKey(schema.getId())) {
    LindenFieldSchema idFieldSchema = new LindenFieldSchema();
    idFieldSchema.setName(schema.getId());
    idFieldSchema.setType(LindenType.STRING);
    idFieldSchema.setIndexed(true);
    idFieldSchema.setOmitNorms(true);
    idFieldSchema.setOmitFreqs(true);
    idFieldSchema.setStored(true);
    idFieldSchema.setTokenized(false);
    fieldSchemaMap.put(schema.getId(), idFieldSchema);
  }
  this.scoreModel = scoreModel;
}
 
源代码3 项目: elasticsearch-image   文件: ImageQuery.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
    Scorer scorer = scorer(context, context.reader().getLiveDocs());
    if (scorer != null) {
        int newDoc = scorer.advance(doc);
        if (newDoc == doc) {
            float score = scorer.score();
            ComplexExplanation result = new ComplexExplanation();
            result.setDescription("ImageQuery, product of:");
            result.setValue(score);
            if (getBoost() != 1.0f) {
                result.addDetail(new Explanation(getBoost(),"boost"));
                score = score / getBoost();
            }
            result.addDetail(new Explanation(score ,"image score (1/distance)"));
            result.setMatch(true);
            return result;
        }
    }

    return new ComplexExplanation(false, 0.0f, "no matching term");
}
 
源代码4 项目: incubator-retired-blur   文件: MutatableAction.java
private IterableRow getIterableRow(String rowId, IndexSearcherCloseable searcher) throws IOException {
  IndexReader indexReader = searcher.getIndexReader();
  BytesRef rowIdRef = new BytesRef(rowId);
  List<AtomicReaderTermsEnum> possibleRowIds = new ArrayList<AtomicReaderTermsEnum>();
  for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    Fields fields = atomicReader.fields();
    if (fields == null) {
      continue;
    }
    Terms terms = fields.terms(BlurConstants.ROW_ID);
    if (terms == null) {
      continue;
    }
    TermsEnum termsEnum = terms.iterator(null);
    if (!termsEnum.seekExact(rowIdRef, true)) {
      continue;
    }
    // need atomic read as well...
    possibleRowIds.add(new AtomicReaderTermsEnum(atomicReader, termsEnum));
  }
  if (possibleRowIds.isEmpty()) {
    return null;
  }
  return new IterableRow(rowId, getRecords(possibleRowIds));
}
 
源代码5 项目: incubator-retired-blur   文件: Blur024CodecTest.java
@Test
public void testDocValuesFormat() throws IOException {
  RAMDirectory directory = new RAMDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43));
  conf.setCodec(new Blur024Codec());
  IndexWriter writer = new IndexWriter(directory, conf);

  Document doc = new Document();
  doc.add(new StringField("f", "v", Store.YES));
  doc.add(new SortedDocValuesField("f", new BytesRef("v")));
  writer.addDocument(doc);

  writer.close();

  DirectoryReader reader = DirectoryReader.open(directory);
  AtomicReaderContext context = reader.leaves().get(0);
  AtomicReader atomicReader = context.reader();
  SortedDocValues sortedDocValues = atomicReader.getSortedDocValues("f");
  assertTrue(sortedDocValues.getClass().getName().startsWith(DiskDocValuesProducer.class.getName()));

  reader.close();
}
 
public SecureIndexSearcher(IndexReaderContext context, ExecutorService executor,
    AccessControlFactory accessControlFactory, Collection<String> readAuthorizations,
    Collection<String> discoverAuthorizations, Set<String> discoverableFields, String defaultReadMaskMessage)
    throws IOException {
  super(context, executor);
  _accessControlFactory = accessControlFactory;
  _readAuthorizations = readAuthorizations;
  _discoverAuthorizations = discoverAuthorizations;
  _discoverableFields = discoverableFields;
  _defaultReadMaskMessage = defaultReadMaskMessage;
  _accessControlReader = _accessControlFactory.getReader(readAuthorizations, discoverAuthorizations,
      discoverableFields, _defaultReadMaskMessage);
  _secureIndexReader = getSecureIndexReader(context);
  List<AtomicReaderContext> leaves = _secureIndexReader.leaves();
  _leaveMap = new HashMap<Object, AtomicReaderContext>();
  for (AtomicReaderContext atomicReaderContext : leaves) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    SecureAtomicReader secureAtomicReader = (SecureAtomicReader) atomicReader;
    AtomicReader originalReader = secureAtomicReader.getOriginalReader();
    Object coreCacheKey = originalReader.getCoreCacheKey();
    _leaveMap.put(coreCacheKey, atomicReaderContext);
  }
}
 
protected Collector getSecureCollector(final Collector collector) {
  return new Collector() {

    @Override
    public void setScorer(Scorer scorer) throws IOException {
      collector.setScorer(scorer);
    }

    @Override
    public void setNextReader(AtomicReaderContext context) throws IOException {
      Object key = context.reader().getCoreCacheKey();
      AtomicReaderContext atomicReaderContext = _leaveMap.get(key);
      collector.setNextReader(atomicReaderContext);
    }

    @Override
    public void collect(int doc) throws IOException {
      collector.collect(doc);
    }

    @Override
    public boolean acceptsDocsOutOfOrder() {
      return collector.acceptsDocsOutOfOrder();
    }
  };
}
 
源代码8 项目: incubator-retired-blur   文件: FacetExecutor.java
public void addScorers(AtomicReaderContext context, Scorer[] scorers) throws IOException {
  LOG.debug(getPrefix("adding scorers context [{0}] [{1}]"), context, Arrays.asList(scorers));
  if (scorers.length != _length) {
    throw new IOException("Scorer length is not correct expecting [" + _length + "] actual [" + scorers.length + "]");
  }
  Object key = getKey(context);
  Info info = _infoMap.get(key);
  if (info == null) {
    info = new Info(context, scorers, _locks, _instance);
    _infoMap.put(key, info);
  } else {
    AtomicReader reader = context.reader();
    LOG.warn(getPrefix("Info about reader context [{0}] already created, existing Info [{1}] current reader [{2}]."),
        context, info, reader);
  }
}
 
/**
 * @param doc SolrDocument to check
 * @param idField field where the id is stored
 * @param fieldType type of id field
 * @param filterQuery Query to filter by
 * @param searcher SolrIndexSearcher on which to apply the filter query
 * @returns the internal docid, or -1 if doc is not found or doesn't match filter
 */
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
      Query filterQuery, SolrIndexSearcher searcher) throws IOException {
  int docid = -1;
  Field f = (Field)doc.getFieldValue(idField.getName());
  String idStr = f.stringValue();
  BytesRef idBytes = new BytesRef();
  fieldType.readableToIndexed(idStr, idBytes);
  // get the internal document id
  long segAndId = searcher.lookupId(idBytes);

    // if docid is valid, run it through the filter
  if (segAndId >= 0) {
    int segid = (int) segAndId;
    AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
    docid = segid + ctx.docBase;
    Weight weight = filterQuery.createWeight(searcher);
    Scorer scorer = weight.scorer(ctx, null);
    if (scorer == null || segid != scorer.advance(segid)) {
      // filter doesn't match.
      docid = -1;
    }
  }
  return docid;
}
 
源代码10 项目: linden   文件: IntermediateForm.java
/**
 * This method is used by the index update combiner and process an
 * intermediate form into the current intermediate form. More specifically,
 * the input intermediate forms are a single-document ram index and/or a
 * single delete term.
 * @param form  the input intermediate form
 * @throws IOException
 */
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
  if (form.dir.ramBytesUsed() > 0 || form.taxoDir.ramBytesUsed() > 0) {
    if (writer == null) {
      createWriter();
    }

    if (facetsConfig != null) {
      DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
      // merge the taxonomies
      taxoWriter.addTaxonomy(form.taxoDir, map);
      int ordinalMap[] = map.getMap();
      DirectoryReader reader = DirectoryReader.open(form.dir);
      try {
        List<AtomicReaderContext> leaves = reader.leaves();
        int numReaders = leaves.size();
        AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
        for (int i = 0; i < numReaders; i++) {
          wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig);
        }
        writer.addIndexes(new MultiReader(wrappedLeaves));
      } finally {
        reader.close();
      }
    } else {
      writer.addIndexes(new Directory[] { form.dir });
    }
    numDocs++;
  }
}
 
源代码11 项目: linden   文件: FlexibleWeight.java
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
  FlexibleScorer scorer = (FlexibleScorer) scorer(context, context.reader().getLiveDocs());
  if (scorer != null) {
    int newDoc = scorer.advance(doc);
    if (newDoc == doc) {
      FlexibleScoreModelStrategy strategy = scorer.getStrategy();
      strategy.prepare(0, 0, true);
      return strategy.explain(similarity, query, doc);
    }
  }
  return new ComplexExplanation(false, 0.0f, "no matching term");
}
 
源代码12 项目: linden   文件: EarlyTerminationCollector.java
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
  collector.setNextReader(context);
  if (sort != null) {
    segmentSorted = SortingMergePolicy.isSorted(context.reader(), sort);
    segmentTotalCollect = segmentSorted ? numDocsToCollectPerSortedSegment : 2147483647;
  } else {
    segmentTotalCollect = numDocsToCollectPerSortedSegment;
  }
  numCollected = 0;
}
 
源代码13 项目: linden   文件: LindenScoreQuery.java
@Override
protected CustomScoreProvider getCustomScoreProvider(AtomicReaderContext context) throws IOException {
  try {
    return new LindenScoreProvider(context);
  } catch (Exception e) {
    throw new IOException(Throwables.getStackTraceAsString(e));
  }
}
 
源代码14 项目: Palmetto   文件: LuceneCorpusAdapter.java
/**
 * Creates a corpus adapter which uses the Lucene index with the given path
 * and searches on the field with the given field name.
 * 
 * @param indexPath
 * @param fieldName
 * @return
 * @throws CorruptIndexException
 * @throws IOException
 */
public static LuceneCorpusAdapter create(String indexPath, String fieldName)
        throws CorruptIndexException, IOException {
    DirectoryReader dirReader = DirectoryReader.open(new NIOFSDirectory(new File(indexPath)));
    List<AtomicReaderContext> leaves = dirReader.leaves();
    AtomicReader reader[] = new AtomicReader[leaves.size()];
    AtomicReaderContext contexts[] = new AtomicReaderContext[leaves.size()];
    for (int i = 0; i < reader.length; i++) {
        contexts[i] = leaves.get(i);
        reader[i] = contexts[i].reader();
    }
    return new LuceneCorpusAdapter(dirReader, reader, contexts, fieldName);
}
 
源代码15 项目: Palmetto   文件: LuceneCorpusAdapter.java
protected LuceneCorpusAdapter(DirectoryReader dirReader, AtomicReader reader[], AtomicReaderContext contexts[],
        String fieldName) {
    this.dirReader = dirReader;
    this.reader = reader;
    this.contexts = contexts;
    this.fieldName = fieldName;
}
 
private void createCacheFile(Path file, SegmentKey segmentKey) throws IOException {
  LOG.info("Building cache for segment [{0}] to [{1}]", segmentKey, file);
  Path tmpPath = getTmpWriterPath(file.getParent());
  try (Writer writer = createWriter(_configuration, tmpPath)) {
    DirectoryReader reader = getReader();
    for (AtomicReaderContext context : reader.leaves()) {
      SegmentReader segmentReader = AtomicReaderUtil.getSegmentReader(context.reader());
      if (segmentReader.getSegmentName().equals(segmentKey.getSegmentName())) {
        writeRowIds(writer, segmentReader);
        break;
      }
    }
  }
  commitWriter(_configuration, file, tmpPath);
}
 
private long getTotalNumberOfRowIds(DirectoryReader reader) throws IOException {
  long total = 0;
  List<AtomicReaderContext> leaves = reader.leaves();
  for (AtomicReaderContext context : leaves) {
    AtomicReader atomicReader = context.reader();
    Terms terms = atomicReader.terms(BlurConstants.ROW_ID);
    long expectedInsertions = terms.size();
    if (expectedInsertions < 0) {
      return -1;
    }
    total += expectedInsertions;
  }
  return total;
}
 
源代码18 项目: incubator-retired-blur   文件: IndexImporter.java
private void applyDeletes(Directory directory, IndexWriter indexWriter, IndexSearcherCloseable searcher,
    String shard, boolean emitDeletes, Configuration configuration) throws IOException {
  DirectoryReader newReader = DirectoryReader.open(directory);
  try {
    List<AtomicReaderContext> newLeaves = newReader.getContext().leaves();
    BlurPartitioner blurPartitioner = new BlurPartitioner();
    Text key = new Text();
    int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount();
    int shardId = ShardUtil.getShardIndex(shard);

    Action action = new Action() {
      @Override
      public void found(AtomicReader reader, Bits liveDocs, TermsEnum termsEnum) throws IOException {
        DocsEnum docsEnum = termsEnum.docs(liveDocs, null);
        if (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
          indexWriter.deleteDocuments(new Term(BlurConstants.ROW_ID, BytesRef.deepCopyOf(termsEnum.term())));
        }
      }
    };

    LOG.info("Applying deletes for table [{0}] shard [{1}] new reader [{2}]", _table, shard, newReader);
    boolean skipCheckRowIds = isInternal(newReader);
    LOG.info("Skip rowid check [{0}] for table [{1}] shard [{2}] new reader [{3}]", skipCheckRowIds, _table, shard,
        newReader);
    for (AtomicReaderContext context : newLeaves) {
      AtomicReader newAtomicReader = context.reader();
      if (isFastRowIdDeleteSupported(newAtomicReader)) {
        runNewRowIdCheckAndDelete(indexWriter, emitDeletes, blurPartitioner, key, numberOfShards, shardId,
            newAtomicReader, skipCheckRowIds);
      } else {
        runOldMergeSortRowIdCheckAndDelete(emitDeletes, searcher.getIndexReader(), blurPartitioner, key,
            numberOfShards, shardId, action, newAtomicReader);
      }
    }
  } finally {
    newReader.close();
  }
}
 
public MergeSortRowIdLookup(IndexReader indexReader) throws IOException {
  if (indexReader instanceof AtomicReader) {
    addAtomicReader((AtomicReader) indexReader);
  } else {
    for (AtomicReaderContext context : indexReader.leaves()) {
      addAtomicReader(context.reader());
    }
  }
}
 
private Callable<Void> newSearchCallable(final Weight weight, final Collector collector, final AtomicReaderContext ctx) {
  return new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      runSearch(weight, collector, ctx);
      return null;
    }
  };
}
 
private void runSearch(Weight weight, Collector collector, AtomicReaderContext ctx) throws IOException {
  Tracer trace = Trace.trace("search - internal", Trace.param("AtomicReader", ctx.reader()));
  try {
    super.search(makeList(ctx), weight, collector);
  } finally {
    trace.done();
  }
}
 
@Test
public void testQueryFilterWrap1() throws IOException {
  IndexReader r = getIndexReader();
  AccessControlFactory accessControlFactory = new FilterAccessControlFactory();
  Collection<String> readAuthorizations = new ArrayList<String>();
  Collection<String> discoverAuthorizations = new ArrayList<String>();
  Set<String> discoverableFields = new HashSet<String>(Arrays.asList("rowid"));
  BlurSecureIndexSearcher blurSecureIndexSearcher = new BlurSecureIndexSearcher(r, null, accessControlFactory,
      readAuthorizations, discoverAuthorizations, discoverableFields, null);
  Query wrapFilter;
  Query query = new TermQuery(new Term("a", "b"));
  Filter filter = new Filter() {
    @Override
    public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
      throw new RuntimeException("Not implemented.");
    }
  };
  {
    Term primeDocTerm = new Term(BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE);
    ScoreType scoreType = ScoreType.SUPER;
    SuperQuery superQuery = new SuperQuery(query, scoreType, primeDocTerm);
    wrapFilter = blurSecureIndexSearcher.wrapFilter(superQuery, filter);
    System.out.println(wrapFilter);
  }
  {
    assertTrue(wrapFilter instanceof SuperQuery);
    SuperQuery sq = (SuperQuery) wrapFilter;
    Query inner = sq.getQuery();
    assertTrue(inner instanceof FilteredQuery);
    FilteredQuery filteredQuery = (FilteredQuery) inner;
    Query innerFilteredQuery = filteredQuery.getQuery();
    assertEquals(innerFilteredQuery, query);
    assertTrue(filteredQuery.getFilter() == filter);
  }
}
 
@Override
public Boolean execute(IndexContext context) throws IOException, InterruptedException {
  try {
    IndexReader indexReader = context.getIndexReader();
    while (true) {
      long hash = 0;
      for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
        AtomicReader reader = atomicReaderContext.reader();
        for (String field : reader.fields()) {
          Terms terms = reader.terms(field);
          BytesRef bytesRef;
          TermsEnum iterator = terms.iterator(null);
          while ((bytesRef = iterator.next()) != null) {
            hash += bytesRef.hashCode();
          }
        }
      }
      System.out.println("hashcode = " + hash);
    }
  } catch (IOException e) {
    e.printStackTrace();
    throw e;
  } catch (Throwable t) {
    t.printStackTrace();
    if (t instanceof InterruptedException) {
      throw t;
    } else if (t instanceof RuntimeException) {
      throw (RuntimeException) t;
    }
    throw new RuntimeException(t);
  }
}
 
private AtomicReader getAtomicReader(DirectoryReader reader) throws IOException {
  List<AtomicReaderContext> leaves = reader.leaves();
  if (leaves.size() == 1) {
    return leaves.get(0).reader();
  }
  throw new IOException("Reader [" + reader + "] has more than one segment after optimize.");
}
 
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  AtomicReader reader = context.reader();
  List<DocIdSet> list = new ArrayList<DocIdSet>();

  Fields fields = reader.fields();
  Terms terms = fields.terms(_fieldName);
  if (terms == null) {
    // if field is not present then show nothing.
    return DocIdSet.EMPTY_DOCIDSET;
  }
  TermsEnum iterator = terms.iterator(null);
  BytesRef bytesRef;
  DocumentVisibilityEvaluator visibilityEvaluator = new DocumentVisibilityEvaluator(_authorizations);
  while ((bytesRef = iterator.next()) != null) {
    if (isVisible(visibilityEvaluator, bytesRef)) {
      DocIdSet docIdSet = _filterCacheStrategy.getDocIdSet(_fieldName, bytesRef, reader);
      if (docIdSet != null) {
        list.add(docIdSet);
      } else {
        // Do not use acceptDocs because we want the acl cache to be version
        // agnostic.
        DocsEnum docsEnum = iterator.docs(null, null);
        list.add(buildCache(reader, docsEnum, bytesRef));
      }
    }
  }
  return getLogicalOr(list);
}
 
源代码26 项目: incubator-retired-blur   文件: FacetExecutor.java
Info(AtomicReaderContext context, Scorer[] scorers, Lock[] locks, String instance) {
  AtomicReader reader = context.reader();
  _instance = instance;
  _bitSet = new OpenBitSet(reader.maxDoc());
  _scorers = scorers;
  _reader = reader;
  _readerStr = _reader.toString();
  _maxDoc = _reader.maxDoc();
  _locks = locks;
}
 
源代码27 项目: incubator-retired-blur   文件: FacetExecutor.java
public OpenBitSet getBitSet(AtomicReaderContext context) throws IOException {
  Info info = _infoMap.get(getKey(context));
  if (info == null) {
    throw new IOException("Info object missing.");
  }
  return info._bitSet;
}
 
源代码28 项目: incubator-retired-blur   文件: FacetQuery.java
private Scorer[] getScorers(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer,
    Bits acceptDocs) throws IOException {
  Scorer[] scorers = new Scorer[_facets.length];
  for (int i = 0; i < scorers.length; i++) {
    scorers[i] = _facets[i].scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
  }
  return scorers;
}
 
private Callable<Void> newSearchCallable(final Weight weight, final Collector collector, final AtomicReaderContext ctx) {
  return new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      runSearch(weight, collector, ctx);
      return null;
    }
  };
}
 
源代码30 项目: incubator-retired-blur   文件: FilterCache.java
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  AtomicReader reader = context.reader();
  Object key = reader.getCoreCacheKey();
  DocIdSet docIdSet = _cache.get(key);
  if (docIdSet != null) {
    _hits.incrementAndGet();
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
  // This will only allow a single instance be created per reader per filter
  Object lock = getLock(key);
  synchronized (lock) {
    SegmentReader segmentReader = getSegmentReader(reader);
    if (segmentReader == null) {
      LOG.warn("Could not find SegmentReader from [{0}]", reader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    Directory directory = getDirectory(segmentReader);
    if (directory == null) {
      LOG.warn("Could not find Directory from [{0}]", segmentReader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    _misses.incrementAndGet();
    String segmentName = segmentReader.getSegmentName();
    docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
    _cache.put(key, docIdSet);
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
}
 
 类所在包
 类方法
 同包方法