类org.apache.lucene.index.SegmentCommitInfo源码实例Demo

下面列出了怎么用org.apache.lucene.index.SegmentCommitInfo的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: vscode-extension   文件: test.java
@Override
public List<Segment> segments(boolean verbose) {
    try (ReleasableLock lock = readLock.acquire()) {
        Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);

        // fill in the merges flag
        Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
        for (OnGoingMerge onGoingMerge : onGoingMerges) {
            for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
                for (Segment segment : segmentsArr) {
                    if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
                        segment.mergeId = onGoingMerge.getId();
                        break;
                    }
                }
            }
        }
        return Arrays.asList(segmentsArr);
    }
}
 
源代码2 项目: Elasticsearch   文件: DLBasedEngine.java
@Override
public List<Segment> segments(boolean verbose) {
    try (ReleasableLock lock = readLock.acquire()) {
        Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);

        // fill in the merges flag
        Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
        for (OnGoingMerge onGoingMerge : onGoingMerges) {
            for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
                for (Segment segment : segmentsArr) {
                    if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
                        segment.mergeId = onGoingMerge.getId();
                        break;
                    }
                }
            }
        }
        return Arrays.asList(segmentsArr);
    }
}
 
源代码3 项目: Elasticsearch   文件: InternalEngine.java
@Override
public List<Segment> segments(boolean verbose) {
    try (ReleasableLock lock = readLock.acquire()) {
        Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);

        // fill in the merges flag
        Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
        for (OnGoingMerge onGoingMerge : onGoingMerges) {
            for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
                for (Segment segment : segmentsArr) {
                    if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
                        segment.mergeId = onGoingMerge.getId();
                        break;
                    }
                }
            }
        }
        return Arrays.asList(segmentsArr);
    }
}
 
源代码4 项目: Elasticsearch   文件: ElasticsearchMergePolicy.java
private boolean shouldUpgrade(SegmentCommitInfo info) {
    org.apache.lucene.util.Version old = info.info.getVersion();
    org.apache.lucene.util.Version cur = Version.CURRENT.luceneVersion;

    // Something seriously wrong if this trips:
    assert old.major <= cur.major;

    if (cur.major > old.major) {
        // Always upgrade segment if Lucene's major version is too old
        return true;
    }
    if (upgradeOnlyAncientSegments == false && cur.minor > old.minor) {
        // If it's only a minor version difference, and we are not upgrading only ancient segments,
        // also upgrade:
        return true;
    }
    // Version matches, or segment is not ancient and we are only upgrading ancient segments:
    return false;
}
 
源代码5 项目: lucene-solr   文件: Lucene50LiveDocsFormat.java
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  long gen = info.getNextDelGen();
  String name = IndexFileNames.fileNameFromGeneration(info.info.name, EXTENSION, gen);
  int delCount = 0;
  try (IndexOutput output = dir.createOutput(name, context)) {
    CodecUtil.writeIndexHeader(output, CODEC_NAME, VERSION_CURRENT, info.info.getId(), Long.toString(gen, Character.MAX_RADIX));
    final int longCount = FixedBitSet.bits2words(bits.length());
    for (int i = 0; i < longCount; ++i) {
      long currentBits = 0;
      for (int j = i << 6, end = Math.min(j + 63, bits.length() - 1); j <= end; ++j) {
        if (bits.get(j)) {
          currentBits |= 1L << j; // mod 64
        } else {
          delCount += 1;
        }
      }
      output.writeLong(currentBits);
    }
    CodecUtil.writeFooter(output);
  }
  if (delCount != info.getDelCount() + newDelCount) {
    throw new CorruptIndexException("bits.deleted=" + delCount + 
        " info.delcount=" + info.getDelCount() + " newdelcount=" + newDelCount, name);
  }
}
 
源代码6 项目: lucene-solr   文件: SegmentsInfoRequestHandler.java
private SimpleOrderedMap<Object> getMergeInformation(SolrQueryRequest req, SegmentInfos infos, List<String> mergeCandidates) throws IOException {
  SimpleOrderedMap<Object> result = new SimpleOrderedMap<>();
  RefCounted<IndexWriter> refCounted = req.getCore().getSolrCoreState().getIndexWriter(req.getCore());
  try {
    IndexWriter indexWriter = refCounted.get();
    if (indexWriter instanceof SolrIndexWriter) {
      result.addAll(((SolrIndexWriter)indexWriter).getRunningMerges());
    }
    //get chosen merge policy
    MergePolicy mp = indexWriter.getConfig().getMergePolicy();
    //Find merges
    MergeSpecification findMerges = mp.findMerges(MergeTrigger.EXPLICIT, infos, indexWriter);
    if (findMerges != null && findMerges.merges != null && findMerges.merges.size() > 0) {
      for (OneMerge merge : findMerges.merges) {
        //TODO: add merge grouping
        for (SegmentCommitInfo mergeSegmentInfo : merge.segments) {
          mergeCandidates.add(mergeSegmentInfo.info.name);
        }
      }
    }

    return result;
  } finally {
    refCounted.decref();
  }
}
 
@Test
public void testSegmentNames() throws IOException {
  String[] segmentNamePatterns = new String[NUM_SEGMENTS];
  h.getCore().withSearcher((searcher) -> {
    int i = 0;
    for (SegmentCommitInfo sInfo : SegmentInfos.readLatestCommit(searcher.getIndexReader().directory())) {
      assertTrue("Unexpected number of segment in the index: " + i, i < NUM_SEGMENTS);
      segmentNamePatterns[i] = "//lst[@name='segments']/lst/str[@name='name'][.='" + sInfo.info.name + "']";
      i++;
    }
    
    return null;
  });
  assertQ("Unexpected segment names returned",
      req("qt","/admin/segments"),
      segmentNamePatterns);
}
 
@Test
public void testFieldInfo() throws Exception {
  String[] segmentNamePatterns = new String[NUM_SEGMENTS];
  h.getCore().withSearcher((searcher) -> {
    int i = 0;
    for (SegmentCommitInfo sInfo : SegmentInfos.readLatestCommit(searcher.getIndexReader().directory())) {
      assertTrue("Unexpected number of segment in the index: " + i, i < NUM_SEGMENTS);
      segmentNamePatterns[i] = "boolean(//lst[@name='segments']/lst[@name='" + sInfo.info.name + "']/lst[@name='fields']/lst[@name='id']/str[@name='flags'])";
      i++;
    }

    return null;
  });
  assertQ("Unexpected field infos returned",
      req("qt","/admin/segments", "fieldInfo", "true"),
      segmentNamePatterns);
}
 
源代码9 项目: crate   文件: Engine.java
protected final DocsStats docsStats(IndexReader indexReader) {
    long numDocs = 0;
    long numDeletedDocs = 0;
    long sizeInBytes = 0;
    // we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause
    // the next scheduled refresh to go through and refresh the stats as well
    for (LeafReaderContext readerContext : indexReader.leaves()) {
        // we go on the segment level here to get accurate numbers
        final SegmentReader segmentReader = Lucene.segmentReader(readerContext.reader());
        SegmentCommitInfo info = segmentReader.getSegmentInfo();
        numDocs += readerContext.reader().numDocs();
        numDeletedDocs += readerContext.reader().numDeletedDocs();
        try {
            sizeInBytes += info.sizeInBytes();
        } catch (IOException e) {
            logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
        }
    }
    return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
}
 
源代码10 项目: crate   文件: InternalEngine.java
@Override
public List<Segment> segments(boolean verbose) {
    try (ReleasableLock lock = readLock.acquire()) {
        Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);

        // fill in the merges flag
        Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
        for (OnGoingMerge onGoingMerge : onGoingMerges) {
            for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
                for (Segment segment : segmentsArr) {
                    if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
                        segment.mergeId = onGoingMerge.getId();
                        break;
                    }
                }
            }
        }
        return Arrays.asList(segmentsArr);
    }
}
 
源代码11 项目: crate   文件: ElasticsearchMergePolicy.java
private boolean shouldUpgrade(SegmentCommitInfo info) {
    org.apache.lucene.util.Version old = info.info.getVersion();
    org.apache.lucene.util.Version cur = Version.CURRENT.luceneVersion;

    // Something seriously wrong if this trips:
    assert old.major <= cur.major;

    if (cur.major > old.major) {
        // Always upgrade segment if Lucene's major version is too old
        return true;
    }
    if (upgradeOnlyAncientSegments == false && cur.minor > old.minor) {
        // If it's only a minor version difference, and we are not upgrading only ancient segments,
        // also upgrade:
        return true;
    }
    // Version matches, or segment is not ancient and we are only upgrading ancient segments:
    return false;
}
 
源代码12 项目: lucene-solr   文件: Segment.java
static Segment of(SegmentCommitInfo segInfo) {
  Segment segment = new Segment();
  segment.name = segInfo.info.name;
  segment.maxDoc = segInfo.info.maxDoc();
  segment.delGen = segInfo.getDelGen();
  segment.delCount = segInfo.getDelCount();
  segment.luceneVer = segInfo.info.getVersion().toString();
  segment.codecName = segInfo.info.getCodec().getName();
  try {
    segment.displaySize = CommitsImpl.toDisplaySize(segInfo.sizeInBytes());
  } catch (IOException e) {
  }
  segment.useCompoundFile = segInfo.info.getUseCompoundFile();
  return segment;
}
 
源代码13 项目: lucene-solr   文件: SimpleTextLiveDocsFormat.java
@Override
public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException {
  assert info.hasDeletions();
  BytesRefBuilder scratch = new BytesRefBuilder();
  CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
  
  String fileName = IndexFileNames.fileNameFromGeneration(info.info.name, LIVEDOCS_EXTENSION, info.getDelGen());
  ChecksumIndexInput in = null;
  boolean success = false;
  try {
    in = dir.openChecksumInput(fileName, context);
    
    SimpleTextUtil.readLine(in, scratch);
    assert StringHelper.startsWith(scratch.get(), SIZE);
    int size = parseIntAt(scratch.get(), SIZE.length, scratchUTF16);
    
    BitSet bits = new BitSet(size);
    
    SimpleTextUtil.readLine(in, scratch);
    while (!scratch.get().equals(END)) {
      assert StringHelper.startsWith(scratch.get(), DOC);
      int docid = parseIntAt(scratch.get(), DOC.length, scratchUTF16);
      bits.set(docid);
      SimpleTextUtil.readLine(in, scratch);
    }
    
    SimpleTextUtil.checkFooter(in);
    
    success = true;
    return new SimpleTextBits(bits, size);
  } finally {
    if (success) {
      IOUtils.close(in);
    } else {
      IOUtils.closeWhileHandlingException(in);
    }
  }
}
 
源代码14 项目: lucene-solr   文件: SimpleTextLiveDocsFormat.java
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  int size = bits.length();
  BytesRefBuilder scratch = new BytesRefBuilder();
  
  String fileName = IndexFileNames.fileNameFromGeneration(info.info.name, LIVEDOCS_EXTENSION, info.getNextDelGen());
  IndexOutput out = null;
  boolean success = false;
  try {
    out = dir.createOutput(fileName, context);
    SimpleTextUtil.write(out, SIZE);
    SimpleTextUtil.write(out, Integer.toString(size), scratch);
    SimpleTextUtil.writeNewline(out);
    
    for (int i = 0; i < size; ++i) {
      if (bits.get(i)) {
        SimpleTextUtil.write(out, DOC);
        SimpleTextUtil.write(out, Integer.toString(i), scratch);
        SimpleTextUtil.writeNewline(out);
      }
    }
    
    SimpleTextUtil.write(out, END);
    SimpleTextUtil.writeNewline(out);
    SimpleTextUtil.writeChecksum(out, scratch);
    success = true;
  } finally {
    if (success) {
      IOUtils.close(out);
    } else {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
源代码15 项目: lucene-solr   文件: AssertingLiveDocsFormat.java
@Override
public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException {
  Bits raw = in.readLiveDocs(dir, info, context);
  assert raw != null;
  check(raw, info.info.maxDoc(), info.getDelCount());
  return new AssertingBits(raw);
}
 
源代码16 项目: lucene-solr   文件: CrankyLiveDocsFormat.java
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  if (random.nextInt(100) == 0) {
    throw new IOException("Fake IOException from LiveDocsFormat.writeLiveDocs()");
  }
  delegate.writeLiveDocs(bits, dir, info, newDelCount, context);
}
 
源代码17 项目: lucene-solr   文件: Lucene50LiveDocsFormat.java
@Override
public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException {
  long gen = info.getDelGen();
  String name = IndexFileNames.fileNameFromGeneration(info.info.name, EXTENSION, gen);
  final int length = info.info.maxDoc();
  try (ChecksumIndexInput input = dir.openChecksumInput(name, context)) {
    Throwable priorE = null;
    try {
      CodecUtil.checkIndexHeader(input, CODEC_NAME, VERSION_START, VERSION_CURRENT, 
                                   info.info.getId(), Long.toString(gen, Character.MAX_RADIX));
      long data[] = new long[FixedBitSet.bits2words(length)];
      for (int i = 0; i < data.length; i++) {
        data[i] = input.readLong();
      }
      FixedBitSet fbs = new FixedBitSet(data, length);
      if (fbs.length() - fbs.cardinality() != info.getDelCount()) {
        throw new CorruptIndexException("bits.deleted=" + (fbs.length() - fbs.cardinality()) + 
                                        " info.delcount=" + info.getDelCount(), input);
      }
      return fbs.asReadOnlyBits();
    } catch (Throwable exception) {
      priorE = exception;
    } finally {
      CodecUtil.checkFooter(input, priorE);
    }
  }
  throw new AssertionError();
}
 
源代码18 项目: crate   文件: Lucene.java
/**
 * Returns an iterable that allows to iterate over all files in this segments info
 */
public static Iterable<String> files(SegmentInfos infos) throws IOException {
    final List<Collection<String>> list = new ArrayList<>();
    list.add(Collections.singleton(infos.getSegmentsFileName()));
    for (SegmentCommitInfo info : infos) {
        list.add(info.files());
    }
    return Iterables.flatten(list);
}
 
源代码19 项目: crate   文件: Lucene.java
/**
 * Returns the number of documents in the index referenced by this {@link SegmentInfos}
 */
public static int getNumDocs(SegmentInfos info) {
    int numDocs = 0;
    for (SegmentCommitInfo si : info) {
        numDocs += si.info.maxDoc() - si.getDelCount() - si.getSoftDelCount();
    }
    return numDocs;
}
 
源代码20 项目: Elasticsearch   文件: OnGoingMerge.java
/**
 * The list of segments that are being merged.
 */
public List<SegmentCommitInfo> getMergedSegments() {
    return mergedSegments;
}
 
源代码21 项目: Elasticsearch   文件: ElasticsearchMergePolicy.java
public IndexUpgraderOneMerge(List<SegmentCommitInfo> segments) {
    super(segments);
}
 
源代码22 项目: Elasticsearch   文件: ElasticsearchMergePolicy.java
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
    int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge, IndexWriter writer)
    throws IOException {

    if (upgradeInProgress) {
        MergeSpecification spec = new IndexUpgraderMergeSpecification();
        for (SegmentCommitInfo info : segmentInfos) {

            if (shouldUpgrade(info)) {

                // TODO: Use IndexUpgradeMergePolicy instead.  We should be comparing codecs,
                // for now we just assume every minor upgrade has a new format.
                logger.debug("Adding segment " + info.info.name + " to be upgraded");
                spec.add(new OneMerge(Collections.singletonList(info)));
            }

            // TODO: we could check IndexWriter.getMergingSegments and avoid adding merges that IW will just reject?

            if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) {
                // hit our max upgrades, so return the spec.  we will get a cascaded call to continue.
                logger.debug("Returning " + spec.merges.size() + " merges for upgrade");
                return spec;
            }
        }

        // We must have less than our max upgrade merges, so the next return will be our last in upgrading mode.
        if (spec.merges.isEmpty() == false) {
            logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade");
            return spec;
        }

        // Only set this once there are 0 segments needing upgrading, because when we return a
        // spec, IndexWriter may (silently!) reject that merge if some of the segments we asked
        // to be merged were already being (naturally) merged:
        upgradeInProgress = false;

        // fall through, so when we don't have any segments to upgrade, the delegate policy
        // has a chance to decide what to do (e.g. collapse the segments to satisfy maxSegmentCount)
    }

    return upgradedMergeSpecification(delegate.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer));
}
 
源代码23 项目: Elasticsearch   文件: ElasticsearchMergePolicy.java
@Override
public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) throws IOException {
    return delegate.useCompoundFile(segments, newSegment, writer);
}
 
源代码24 项目: linden   文件: SortingMergePolicyDecorator.java
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount,
                                           Map<SegmentCommitInfo, Boolean> segmentsToMerge,
                                           IndexWriter writer) throws IOException {
  return sortingMergePolicy.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer);
}
 
源代码25 项目: lucene-solr   文件: PrimaryNode.java
/** Called when a merge has finished, but before IW switches to the merged segment */
protected abstract void preCopyMergedSegmentFiles(SegmentCommitInfo info, Map<String,FileMetaData> files) throws IOException;
 
源代码26 项目: lucene-solr   文件: AssertingLiveDocsFormat.java
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  check(bits, info.info.maxDoc(), info.getDelCount() + newDelCount);
  in.writeLiveDocs(bits, dir, info, newDelCount, context);
}
 
源代码27 项目: lucene-solr   文件: CrankyLiveDocsFormat.java
@Override
public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException {
  return delegate.readLiveDocs(dir, info, context);
}
 
源代码28 项目: lucene-solr   文件: LiveDocsFormat.java
/** Read live docs bits. */
public abstract Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException;
 
源代码29 项目: lucene-solr   文件: SegmentsInfoRequestHandler.java
private SimpleOrderedMap<Object> getSegmentInfo(
    SegmentCommitInfo segmentCommitInfo, boolean withSizeInfo, boolean withFieldInfos,
    List<LeafReaderContext> leafContexts, IndexSchema schema) throws IOException {
  SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();

  segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
  segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
  segmentInfoMap.add("softDelCount", segmentCommitInfo.getSoftDelCount());
  segmentInfoMap.add("hasFieldUpdates", segmentCommitInfo.hasFieldUpdates());
  segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
  segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
  Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
      .get("timestamp"));
  segmentInfoMap.add("age", new Date(timestamp));
  segmentInfoMap.add("source",
      segmentCommitInfo.info.getDiagnostics().get("source"));
  segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
  // don't open a new SegmentReader - try to find the right one from the leaf contexts
  SegmentReader seg = null;
  for (LeafReaderContext lrc : leafContexts) {
    LeafReader leafReader = lrc.reader();
    leafReader = FilterLeafReader.unwrap(leafReader);
    if (leafReader instanceof SegmentReader) {
      SegmentReader sr = (SegmentReader)leafReader;
      if (sr.getSegmentInfo().info.equals(segmentCommitInfo.info)) {
        seg = sr;
        break;
      }
    }
  }
  if (seg != null) {
    LeafMetaData metaData = seg.getMetaData();
    if (metaData != null) {
      segmentInfoMap.add("createdVersionMajor", metaData.getCreatedVersionMajor());
      segmentInfoMap.add("minVersion", metaData.getMinVersion().toString());
      if (metaData.getSort() != null) {
        segmentInfoMap.add("sort", metaData.getSort().toString());
      }
    }
  }
  if (!segmentCommitInfo.info.getDiagnostics().isEmpty()) {
    segmentInfoMap.add("diagnostics", segmentCommitInfo.info.getDiagnostics());
  }
  if (!segmentCommitInfo.info.getAttributes().isEmpty()) {
    segmentInfoMap.add("attributes", segmentCommitInfo.info.getAttributes());
  }
  if (withSizeInfo) {
    Directory dir = segmentCommitInfo.info.dir;
    List<Pair<String, Long>> files = segmentCommitInfo.files().stream()
        .map(f -> {
          long size = -1;
          try {
            size = dir.fileLength(f);
          } catch (IOException e) {
          }
          return new Pair<String, Long>(f, size);
        }).sorted((p1, p2) -> {
          if (p1.second() > p2.second()) {
            return -1;
          } else if (p1.second() < p2.second()) {
            return 1;
          } else {
            return 0;
          }
        }).collect(Collectors.toList());
    if (!files.isEmpty()) {
      SimpleOrderedMap<Object> topFiles = new SimpleOrderedMap<>();
      for (int i = 0; i < Math.min(files.size(), 5); i++) {
        Pair<String, Long> p = files.get(i);
        topFiles.add(p.first(), RamUsageEstimator.humanReadableUnits(p.second()));
      }
      segmentInfoMap.add("largestFiles", topFiles);
    }
  }
  if (seg != null && withSizeInfo) {
    SimpleOrderedMap<Object> ram = new SimpleOrderedMap<>();
    ram.add("total", seg.ramBytesUsed());
    for (Accountable ac : seg.getChildResources()) {
      accountableToMap(ac, ram::add);
    }
    segmentInfoMap.add("ramBytesUsed", ram);
  }
  if (withFieldInfos) {
    if (seg == null) {
      log.debug("Skipping segment info - not available as a SegmentReader: {}", segmentCommitInfo);
    } else {
      FieldInfos fis = seg.getFieldInfos();
      SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
      for (FieldInfo fi : fis) {
        fields.add(fi.name, getFieldInfo(seg, fi, schema));
      }
      segmentInfoMap.add("fields", fields);
    }
  }

  return segmentInfoMap;
}
 
public UninvertDocValuesOneMerge(List<SegmentCommitInfo> segments) {
  super(segments);
}
 
 类所在包
 类方法
 同包方法