org.apache.lucene.index.LeafReaderContext#reader ( )源码实例Demo

下面列出了org.apache.lucene.index.LeafReaderContext#reader ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Override
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
    LeafReader reader = context.reader();

    Terms terms = reader.terms(getFieldNames().indexName());
    AtomicGeoPointFieldData data = null;
    // TODO: Use an actual estimator to estimate before loading.
    NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA));
    if (terms == null) {
        data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc());
        estimator.afterLoad(null, data.ramBytesUsed());
        return data;
    }
    return (Version.indexCreated(indexSettings).before(Version.V_2_2_0)) ?
        loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}
 
源代码2 项目: mtas   文件: CodecCollector.java
/**
 * Collect collection.
 *
 * @param reader
 *          the reader
 * @param docSet
 *          the doc set
 * @param collectionInfo
 *          the collection info
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
public static void collectCollection(IndexReader reader, List<Integer> docSet,
    ComponentCollection collectionInfo) throws IOException {
  if (collectionInfo.action().equals(ComponentCollection.ACTION_CHECK)) {
    // can't do anything in lucene for check
  } else if (collectionInfo.action()
      .equals(ComponentCollection.ACTION_LIST)) {
    // can't do anything in lucene for list
  } else if (collectionInfo.action()
      .equals(ComponentCollection.ACTION_CREATE)) {
    BytesRef term = null;
    PostingsEnum postingsEnum = null;
    Integer docId;
    Integer termDocId = -1;
    Terms terms;
    LeafReaderContext lrc;
    LeafReader r;
    ListIterator<LeafReaderContext> iterator = reader.leaves().listIterator();
    while (iterator.hasNext()) {
      lrc = iterator.next();
      r = lrc.reader();
      for (String field : collectionInfo.fields()) {
        if ((terms = r.terms(field)) != null) {
          TermsEnum termsEnum = terms.iterator();
          while ((term = termsEnum.next()) != null) {
            Iterator<Integer> docIterator = docSet.iterator();
            postingsEnum = termsEnum.postings(postingsEnum,
                PostingsEnum.NONE);
            termDocId = -1;
            while (docIterator.hasNext()) {
              docId = docIterator.next() - lrc.docBase;
              if ((docId >= termDocId) && ((docId.equals(termDocId))
                  || ((termDocId = postingsEnum.advance(docId))
                      .equals(docId)))) {
                collectionInfo.addValue(term.utf8ToString());
                break;
              }
              if (termDocId.equals(PostingsEnum.NO_MORE_DOCS)) {
                break;
              }
            }
          }
        }
      }
    }
  }
}
 
源代码3 项目: HongsCORE   文件: StatisHelper.java
@Override
public LeafCollector getLeafCollector(LeafReaderContext lrc) throws IOException {
    LeafReader reader = lrc.reader( );

    for (int i = 0; i < fields.length; i ++) {
        if (groups[i][1] == 1) {
            values[i] = reader.getSortedNumericDocValues("%"+fields[i]);
        } else {
            values[i] = reader.      getNumericDocValues("#"+fields[i]);
        }
    }

    return this;
}
 
源代码4 项目: lucene-solr   文件: TestLucene80DocValuesFormat.java
private void assertDVAdvance(Directory dir, int jumpStep) throws IOException {
  DirectoryReader ir = DirectoryReader.open(dir);
  TestUtil.checkReader(ir);
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();


    for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) {
      // Create a new instance each time to ensure jumps from the beginning
      NumericDocValues docValues = DocValues.getNumeric(r, "dv");
      for (int docID = 0; docID < r.maxDoc(); docID += jump) {
        String base = "document #" + docID + "/" + r.maxDoc() + ", jumping " + jump + " from #" + (docID-jump);
        String storedValue = r.document(docID).get("stored");
        if (storedValue == null) {
          assertFalse("There should be no DocValue for " + base,
              docValues.advanceExact(docID));
        } else {
          assertTrue("There should be a DocValue for " + base,
              docValues.advanceExact(docID));
          assertEquals("The doc value should be correct for " + base,
              Long.parseLong(storedValue), docValues.longValue());
        }
      }
    }
  }
  ir.close();
}
 
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
  LeafReader reader = context.reader();
  FieldInfo info = reader.getFieldInfos().fieldInfo(field);
  if (info != null) {
    LatLonDocValuesField.checkCompatible(info);
  }
  currentDocs = DocValues.getSortedNumeric(reader, field);
  valuesDocID = -1;
  return this;
}
 
源代码6 项目: lucene-solr   文件: TestMultipleIndexFields.java
private void assertOrdinalsExist(String field, IndexReader ir) throws IOException {
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();
    if (r.getBinaryDocValues(field) != null) {
      return; // not all segments must have this DocValues
    }
  }
  fail("no ordinals found for " + field);
}
 
源代码7 项目: lucene-solr   文件: CheckJoinIndex.java
/**
 * Check that the given index is good to use for block joins.
 * @throws IllegalStateException if the index does not have an appropriate structure
 */
public static void check(IndexReader reader, BitSetProducer parentsFilter) throws IOException {
  for (LeafReaderContext context : reader.leaves()) {
    if (context.reader().maxDoc() == 0) {
      continue;
    }
    final BitSet parents = parentsFilter.getBitSet(context);
    if (parents == null || parents.cardinality() == 0) {
      throw new IllegalStateException("Every segment should have at least one parent, but " + context.reader() + " does not have any");
    }
    if (parents.get(context.reader().maxDoc() - 1) == false) {
      throw new IllegalStateException("The last document of a segment must always be a parent, but " + context.reader() + " has a child as a last doc");
    }
    final Bits liveDocs = context.reader().getLiveDocs();
    if (liveDocs != null) {
      int prevParentDoc = -1;
      DocIdSetIterator it = new BitSetIterator(parents, 0L);
      for (int parentDoc = it.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = it.nextDoc()) {
        final boolean parentIsLive = liveDocs.get(parentDoc);
        for (int child = prevParentDoc + 1; child != parentDoc; child++) {
          final boolean childIsLive = liveDocs.get(child);
          if (parentIsLive != childIsLive) {
            if (childIsLive) {
              throw new IllegalStateException("Parent doc " + parentDoc + " of segment " + context.reader() + " is live but has a deleted child document " + child);
            } else {
              throw new IllegalStateException("Parent doc " + parentDoc + " of segment " + context.reader() + " is deleted but has a live child document " + child);
            }
          }
        }
        prevParentDoc = parentDoc;
      }
    }
  }
}
 
源代码8 项目: Elasticsearch   文件: SourceLookup.java
public void setSegmentAndDocument(LeafReaderContext context, int docId) {
    if (this.reader == context.reader() && this.docId == docId) {
        // if we are called with the same document, don't invalidate source
        return;
    }
    this.reader = context.reader();
    this.source = null;
    this.sourceAsBytes = null;
    this.docId = docId;
}
 
源代码9 项目: lucene-solr   文件: DirectoryTaxonomyWriter.java
/**
 * Takes the categories from the given taxonomy directory, and adds the
 * missing ones to this taxonomy. Additionally, it fills the given
 * {@link OrdinalMap} with a mapping from the original ordinal to the new
 * ordinal.
 */
public void addTaxonomy(Directory taxoDir, OrdinalMap map) throws IOException {
  ensureOpen();
  DirectoryReader r = DirectoryReader.open(taxoDir);
  try {
    final int size = r.numDocs();
    final OrdinalMap ordinalMap = map;
    ordinalMap.setSize(size);
    int base = 0;
    PostingsEnum docs = null;
    for (final LeafReaderContext ctx : r.leaves()) {
      final LeafReader ar = ctx.reader();
      final Terms terms = ar.terms(Consts.FULL);
      // TODO: share per-segment TermsEnum here!
      TermsEnum te = terms.iterator();
      while (te.next() != null) {
        FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
        final int ordinal = addCategory(cp);
        docs = te.postings(docs, PostingsEnum.NONE);
        ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
      }
      base += ar.maxDoc(); // no deletions, so we're ok
    }
    ordinalMap.addDone();
  } finally {
    r.close();
  }
}
 
源代码10 项目: lucene-solr   文件: IndexSizeEstimator.java
private void estimateStoredFields(Map<String, Object> result) throws IOException {
  log.info("- estimating stored fields...");
  Map<String, Map<String, Object>> stats = new HashMap<>();
  for (LeafReaderContext context : reader.leaves()) {
    LeafReader leafReader = context.reader();
    EstimatingVisitor visitor = new EstimatingVisitor(stats, topN, maxLength, samplingStep);
    Bits liveDocs = leafReader.getLiveDocs();
    if (leafReader instanceof CodecReader) {
      CodecReader codecReader = (CodecReader)leafReader;
      StoredFieldsReader storedFieldsReader = codecReader.getFieldsReader();
      // this instance may be faster for a full sequential pass
      StoredFieldsReader mergeInstance = storedFieldsReader.getMergeInstance();
      for (int docId = 0; docId < leafReader.maxDoc(); docId += samplingStep) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        mergeInstance.visitDocument(docId, visitor);
      }
      if (mergeInstance != storedFieldsReader) {
        mergeInstance.close();
      }
    } else {
      for (int docId = 0; docId < leafReader.maxDoc(); docId += samplingStep) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        leafReader.document(docId, visitor);
      }
    }
  }
  result.put(STORED_FIELDS, stats);
}
 
源代码11 项目: lucene-solr   文件: BBoxValueSource.java
@Override
public ShapeValues getValues(LeafReaderContext readerContext) throws IOException {
  LeafReader reader = readerContext.reader();
  final NumericDocValues minX = DocValues.getNumeric(reader, strategy.field_minX);
  final NumericDocValues minY = DocValues.getNumeric(reader, strategy.field_minY);
  final NumericDocValues maxX = DocValues.getNumeric(reader, strategy.field_maxX);
  final NumericDocValues maxY = DocValues.getNumeric(reader, strategy.field_maxY);

  //reused
  final Rectangle rect = strategy.getSpatialContext().getShapeFactory().rect(0,0,0,0);

  return new ShapeValues() {

    @Override
    public boolean advanceExact(int doc) throws IOException {
      return minX.advanceExact(doc) && minY.advanceExact(doc) && maxX.advanceExact(doc) && maxY.advanceExact(doc);
    }

    @Override
    public Shape value() throws IOException {
      double minXValue = Double.longBitsToDouble(minX.longValue());
      double minYValue = Double.longBitsToDouble(minY.longValue());
      double maxXValue = Double.longBitsToDouble(maxX.longValue());
      double maxYValue = Double.longBitsToDouble(maxY.longValue());
      rect.reset(minXValue, maxXValue, minYValue, maxYValue);
      return rect;
    }

  };
}
 
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
  LeafReader reader = context.reader();
  FieldInfo info = reader.getFieldInfos().fieldInfo(field);
  if (info != null) {
    Geo3DDocValuesField.checkCompatible(info);
  }
  currentDocs = DocValues.getSortedNumeric(reader, field);
  return this;
}
 
源代码13 项目: lucene-solr   文件: TestLucene80DocValuesFormat.java
private void doTestSortedNumericBlocksOfVariousBitsPerValue(LongSupplier counts) throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMaxBufferedDocs(atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE));
  conf.setRAMBufferSizeMB(-1);
  conf.setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  IndexWriter writer = new IndexWriter(dir, conf);
  
  final int numDocs = atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE*3);
  final LongSupplier values = blocksOfVariousBPV();
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    
    int valueCount = (int) counts.getAsLong();
    long valueArray[] = new long[valueCount];
    for (int j = 0; j < valueCount; j++) {
      long value = values.getAsLong();
      valueArray[j] = value;
      doc.add(new SortedNumericDocValuesField("dv", value));
    }
    Arrays.sort(valueArray);
    for (int j = 0; j < valueCount; j++) {
      doc.add(new StoredField("stored", Long.toString(valueArray[j])));
    }
    writer.addDocument(doc);
    if (random().nextInt(31) == 0) {
      writer.commit();
    }
  }
  writer.forceMerge(1);

  writer.close();
  
  // compare
  DirectoryReader ir = DirectoryReader.open(dir);
  TestUtil.checkReader(ir);
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();
    SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv");
    for (int i = 0; i < r.maxDoc(); i++) {
      if (i > docValues.docID()) {
        docValues.nextDoc();
      }
      String expected[] = r.document(i).getValues("stored");
      if (i < docValues.docID()) {
        assertEquals(0, expected.length);
      } else {
        String actual[] = new String[docValues.docValueCount()];
        for (int j = 0; j < actual.length; j++) {
          actual[j] = Long.toString(docValues.nextValue());
        }
        assertArrayEquals(expected, actual);
      }
    }
  }
  ir.close();
  dir.close();
}
 
源代码14 项目: Elasticsearch   文件: FieldVisitorCollector.java
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
    super.doSetNextReader(context);
    collector.getLeafCollector(context);
    currentReader = context.reader();
}
 
源代码15 项目: lucene-solr   文件: RptWithGeometrySpatialField.java
@Override
public ShapeValues getValues(LeafReaderContext readerContext) throws IOException {
  final ShapeValues targetFuncValues = targetValueSource.getValues(readerContext);
  // The key is a pair of leaf reader with a docId relative to that reader. The value is a Map from field to Shape.
  @SuppressWarnings({"unchecked"})
  final SolrCache<PerSegCacheKey,Shape> cache =
      SolrRequestInfo.getRequestInfo().getReq().getSearcher().getCache(CACHE_KEY_PREFIX + fieldName);
  if (cache == null) {
    return targetFuncValues; // no caching; no configured cache
  }

  return new ShapeValues() {
    int docId = -1;

    @Override
    public Shape value() throws IOException {
      //lookup in cache
      IndexReader.CacheHelper cacheHelper = readerContext.reader().getCoreCacheHelper();
      if (cacheHelper == null) {
        throw new IllegalStateException("Leaf " + readerContext.reader() + " is not suited for caching");
      }
      PerSegCacheKey key = new PerSegCacheKey(cacheHelper.getKey(), docId);
      Shape shape = cache.computeIfAbsent(key, k -> {
        try {
          return targetFuncValues.value();
        } catch (IOException e) {
          return null;
        }
      });
      if (shape != null) {
        //optimize shape on a cache hit if possible. This must be thread-safe and it is.
        if (shape instanceof JtsGeometry) {
          ((JtsGeometry) shape).index(); // TODO would be nice if some day we didn't have to cast
        }
      }
      return shape;
    }

    @Override
    public boolean advanceExact(int doc) throws IOException {
      this.docId = doc;
      return targetFuncValues.advanceExact(doc);
    }

  };

}
 
源代码16 项目: Elasticsearch   文件: BinaryDVIndexFieldData.java
@Override
public BinaryDVAtomicFieldData load(LeafReaderContext context) {
    return new BinaryDVAtomicFieldData(context.reader(), fieldNames.indexName());
}
 
源代码17 项目: Elasticsearch   文件: QueriesLoaderCollector.java
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
    reader = context.reader();
    uidValues = uidFieldData.load(context).getBytesValues();
}
 
源代码18 项目: lucene-solr   文件: SegmentsInfoRequestHandler.java
private SimpleOrderedMap<Object> getSegmentInfo(
    SegmentCommitInfo segmentCommitInfo, boolean withSizeInfo, boolean withFieldInfos,
    List<LeafReaderContext> leafContexts, IndexSchema schema) throws IOException {
  SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();

  segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
  segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
  segmentInfoMap.add("softDelCount", segmentCommitInfo.getSoftDelCount());
  segmentInfoMap.add("hasFieldUpdates", segmentCommitInfo.hasFieldUpdates());
  segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
  segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
  Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
      .get("timestamp"));
  segmentInfoMap.add("age", new Date(timestamp));
  segmentInfoMap.add("source",
      segmentCommitInfo.info.getDiagnostics().get("source"));
  segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
  // don't open a new SegmentReader - try to find the right one from the leaf contexts
  SegmentReader seg = null;
  for (LeafReaderContext lrc : leafContexts) {
    LeafReader leafReader = lrc.reader();
    leafReader = FilterLeafReader.unwrap(leafReader);
    if (leafReader instanceof SegmentReader) {
      SegmentReader sr = (SegmentReader)leafReader;
      if (sr.getSegmentInfo().info.equals(segmentCommitInfo.info)) {
        seg = sr;
        break;
      }
    }
  }
  if (seg != null) {
    LeafMetaData metaData = seg.getMetaData();
    if (metaData != null) {
      segmentInfoMap.add("createdVersionMajor", metaData.getCreatedVersionMajor());
      segmentInfoMap.add("minVersion", metaData.getMinVersion().toString());
      if (metaData.getSort() != null) {
        segmentInfoMap.add("sort", metaData.getSort().toString());
      }
    }
  }
  if (!segmentCommitInfo.info.getDiagnostics().isEmpty()) {
    segmentInfoMap.add("diagnostics", segmentCommitInfo.info.getDiagnostics());
  }
  if (!segmentCommitInfo.info.getAttributes().isEmpty()) {
    segmentInfoMap.add("attributes", segmentCommitInfo.info.getAttributes());
  }
  if (withSizeInfo) {
    Directory dir = segmentCommitInfo.info.dir;
    List<Pair<String, Long>> files = segmentCommitInfo.files().stream()
        .map(f -> {
          long size = -1;
          try {
            size = dir.fileLength(f);
          } catch (IOException e) {
          }
          return new Pair<String, Long>(f, size);
        }).sorted((p1, p2) -> {
          if (p1.second() > p2.second()) {
            return -1;
          } else if (p1.second() < p2.second()) {
            return 1;
          } else {
            return 0;
          }
        }).collect(Collectors.toList());
    if (!files.isEmpty()) {
      SimpleOrderedMap<Object> topFiles = new SimpleOrderedMap<>();
      for (int i = 0; i < Math.min(files.size(), 5); i++) {
        Pair<String, Long> p = files.get(i);
        topFiles.add(p.first(), RamUsageEstimator.humanReadableUnits(p.second()));
      }
      segmentInfoMap.add("largestFiles", topFiles);
    }
  }
  if (seg != null && withSizeInfo) {
    SimpleOrderedMap<Object> ram = new SimpleOrderedMap<>();
    ram.add("total", seg.ramBytesUsed());
    for (Accountable ac : seg.getChildResources()) {
      accountableToMap(ac, ram::add);
    }
    segmentInfoMap.add("ramBytesUsed", ram);
  }
  if (withFieldInfos) {
    if (seg == null) {
      log.debug("Skipping segment info - not available as a SegmentReader: {}", segmentCommitInfo);
    } else {
      FieldInfos fis = seg.getFieldInfos();
      SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
      for (FieldInfo fi : fis) {
        fields.add(fi.name, getFieldInfo(seg, fi, schema));
      }
      segmentInfoMap.add("fields", fields);
    }
  }

  return segmentInfoMap;
}
 
源代码19 项目: lucene-solr   文件: IndexSizeEstimatorTest.java
@Test
public void testEstimator() throws Exception {
  JettySolrRunner jetty = cluster.getRandomJetty(random());
  String randomCoreName = jetty.getCoreContainer().getAllCoreNames().iterator().next();
  SolrCore core = jetty.getCoreContainer().getCore(randomCoreName);
  RefCounted<SolrIndexSearcher> searcherRef = core.getSearcher();
  try {
    SolrIndexSearcher searcher = searcherRef.get();
    // limit the max length
    IndexSizeEstimator estimator = new IndexSizeEstimator(searcher.getRawReader(), 20, 50, true, true);
    IndexSizeEstimator.Estimate estimate = estimator.estimate();
    Map<String, Long> fieldsBySize = estimate.getFieldsBySize();
    assertFalse("empty fieldsBySize", fieldsBySize.isEmpty());
    assertEquals(fieldsBySize.toString(), fields.size(), fieldsBySize.size());
    fieldsBySize.forEach((k, v) -> assertTrue("unexpected size of " + k + ": " + v, v > 0));
    Map<String, Long> typesBySize = estimate.getTypesBySize();
    assertFalse("empty typesBySize", typesBySize.isEmpty());
    assertTrue("expected at least 8 types: " + typesBySize.toString(), typesBySize.size() >= 8);
    typesBySize.forEach((k, v) -> assertTrue("unexpected size of " + k + ": " + v, v > 0));
    Map<String, Object> summary = estimate.getSummary();
    assertNotNull("summary", summary);
    assertFalse("empty summary", summary.isEmpty());
    assertEquals(summary.keySet().toString(), fields.size(), summary.keySet().size());
    Map<String, Object> details = estimate.getDetails();
    assertNotNull("details", details);
    assertFalse("empty details", details.isEmpty());
    // by type
    assertEquals(details.keySet().toString(), 6, details.keySet().size());

    // check sampling
    estimator.setSamplingThreshold(searcher.getRawReader().maxDoc() / 2);
    IndexSizeEstimator.Estimate sampledEstimate = estimator.estimate();
    Map<String, Long> sampledFieldsBySize = sampledEstimate.getFieldsBySize();
    assertFalse("empty fieldsBySize", sampledFieldsBySize.isEmpty());
    // verify that the sampled values are within 50% of the original values
    fieldsBySize.forEach((field, size) -> {
      Long sampledSize = sampledFieldsBySize.get(field);
      assertNotNull("sampled size for " + field + " is missing in " + sampledFieldsBySize, sampledSize);
      double delta = (double) size * 0.5;
      assertEquals("sampled size of " + field + " is wildly off", (double)size, (double)sampledSize, delta);
    });
    // verify the reader is still usable - SOLR-13694
    IndexReader reader = searcher.getRawReader();
    for (LeafReaderContext context : reader.leaves()) {
      LeafReader leafReader = context.reader();
      assertTrue("unexpected LeafReader class: " + leafReader.getClass().getName(), leafReader instanceof CodecReader);
      Bits liveDocs = leafReader.getLiveDocs();
      CodecReader codecReader = (CodecReader) leafReader;
      StoredFieldsReader storedFieldsReader = codecReader.getFieldsReader();
      StoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
      assertNotNull(storedFieldsReader);
      for (int docId = 0; docId < leafReader.maxDoc(); docId++) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        storedFieldsReader.visitDocument(docId, visitor);
      }
    }
  } finally {
    searcherRef.decref();
    core.close();
  }
}
 
源代码20 项目: lucene-solr   文件: SpanWeight.java
/**
 * Return a LeafSimScorer for this context
 * @param context the LeafReaderContext
 * @return a SimWeight
 * @throws IOException on error
 */
public LeafSimScorer getSimScorer(LeafReaderContext context) throws IOException {
  return simScorer == null ? null : new LeafSimScorer(simScorer, context.reader(), field, true);
}
 
 同类方法