类org.apache.lucene.index.IndexFileNames源码实例Demo

下面列出了怎么用org.apache.lucene.index.IndexFileNames的API类实例代码及写法,或者点击链接到github查看源代码。

public CompletionFieldsConsumer(SegmentWriteState state) throws IOException {
    this.delegatesFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
    IndexOutput output = null;
    boolean success = false;
    try {
        output = state.directory.createOutput(suggestFSTFile, state.context);
        CodecUtil.writeHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT);
        /*
         * we write the delegate postings format name so we can load it
         * without getting an instance in the ctor
         */
        output.writeString(delegatePostingsFormat.getName());
        output.writeString(writeProvider.getName());
        this.suggestFieldsConsumer = writeProvider.consumer(output);
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(output);
        }
    }
}
 
源代码2 项目: lucene-solr   文件: IndexReplicationHandler.java
/**
 * Verifies that the last file is segments_N and fails otherwise. It also
 * removes and returns the file from the list, because it needs to be handled
 * last, after all files. This is important in order to guarantee that if a
 * reader sees the new segments_N, all other segment files are already on
 * stable storage.
 * <p>
 * The reason why the code fails instead of putting segments_N file last is
 * that this indicates an error in the Revision implementation.
 */
public static String getSegmentsFile(List<String> files, boolean allowEmpty) {
  if (files.isEmpty()) {
    if (allowEmpty) {
      return null;
    } else {
      throw new IllegalStateException("empty list of files not allowed");
    }
  }
  
  String segmentsFile = files.remove(files.size() - 1);
  if (!segmentsFile.startsWith(IndexFileNames.SEGMENTS)) {
    throw new IllegalStateException("last file to copy+sync must be segments_N but got " + segmentsFile
        + "; check your Revision implementation!");
  }
  return segmentsFile;
}
 
@Test
public void testSegmentsFileLast() throws Exception {
  Directory indexDir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  
  Directory taxoDir = newDirectory();
  SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
  try {
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
    Map<String,List<RevisionFile>> sourceFiles = rev.getSourceFiles();
    assertEquals(2, sourceFiles.size());
    for (List<RevisionFile> files : sourceFiles.values()) {
      String lastFile = files.get(files.size() - 1).fileName;
      assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
    }
    indexWriter.close();
  } finally {
    IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
  }
}
 
源代码4 项目: lucene-solr   文件: IndexRevisionTest.java
@Test
public void testRevisionRelease() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter writer = new IndexWriter(dir, conf);
  try {
    writer.addDocument(new Document());
    writer.commit();
    Revision rev1 = new IndexRevision(writer);
    // releasing that revision should not delete the files
    rev1.release();
    assertTrue(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
    
    rev1 = new IndexRevision(writer); // create revision again, so the files are snapshotted
    writer.addDocument(new Document());
    writer.commit();
    assertNotNull(new IndexRevision(writer));
    rev1.release(); // this release should trigger the delete of segments_1
    assertFalse(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
  } finally {
    IOUtils.close(writer, dir);
  }
}
 
源代码5 项目: lucene-solr   文件: IndexRevisionTest.java
@Test
public void testSegmentsFileLast() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter writer = new IndexWriter(dir, conf);
  try {
    writer.addDocument(new Document());
    writer.commit();
    Revision rev = new IndexRevision(writer);
    @SuppressWarnings("unchecked")
    Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
    assertEquals(1, sourceFiles.size());
    List<RevisionFile> files = sourceFiles.values().iterator().next();
    String lastFile = files.get(files.size() - 1).fileName;
    assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
    writer.close();
  } finally {
    IOUtils.close(dir);
  }
}
 
源代码6 项目: lucene-solr   文件: FSTTermsWriter.java
public FSTTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter) throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);

  this.postingsWriter = postingsWriter;
  this.fieldInfos = state.fieldInfos;
  this.out = state.directory.createOutput(termsFileName, state.context);
  this.maxDoc = state.segmentInfo.maxDoc();

  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT,
                                      state.segmentInfo.getId(), state.segmentSuffix);   

    this.postingsWriter.init(out, state); 
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
源代码7 项目: lucene-solr   文件: FixedGapTermsIndexWriter.java
public FixedGapTermsIndexWriter(SegmentWriteState state, int termIndexInterval) throws IOException {
  if (termIndexInterval <= 0) {
    throw new IllegalArgumentException("invalid termIndexInterval: " + termIndexInterval);
  }
  this.termIndexInterval = termIndexInterval;
  final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
  out = state.directory.createOutput(indexFileName, state.context);
  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    out.writeVInt(termIndexInterval);
    out.writeVInt(PackedInts.VERSION_CURRENT);
    out.writeVInt(BLOCKSIZE);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
源代码8 项目: lucene-solr   文件: BlockTermsWriter.java
public BlockTermsWriter(TermsIndexWriterBase termsIndexWriter,
    SegmentWriteState state, PostingsWriterBase postingsWriter)
    throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
  this.termsIndexWriter = termsIndexWriter;
  maxDoc = state.segmentInfo.maxDoc();
  out = state.directory.createOutput(termsFileName, state.context);
  boolean success = false;
  try {
    fieldInfos = state.fieldInfos;
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    currentField = null;
    this.postingsWriter = postingsWriter;
    // segment = state.segmentName;
    
    //System.out.println("BTW.init seg=" + state.segmentName);
    
    postingsWriter.init(out, state); // have consumer write its format/header
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
源代码9 项目: lucene-solr   文件: SimpleTextPointsWriter.java
@Override
public void close() throws IOException {
  if (dataOut != null) {
    dataOut.close();
    dataOut = null;

    // Write index file
    String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextPointsFormat.POINT_INDEX_EXTENSION);
    try (IndexOutput indexOut = writeState.directory.createOutput(fileName, writeState.context)) {
      int count = indexFPs.size();
      write(indexOut, FIELD_COUNT);
      write(indexOut, Integer.toString(count));
      newline(indexOut);
      for(Map.Entry<String,Long> ent : indexFPs.entrySet()) {
        write(indexOut, FIELD_FP_NAME);
        write(indexOut, ent.getKey());
        newline(indexOut);
        write(indexOut, FIELD_FP);
        write(indexOut, Long.toString(ent.getValue()));
        newline(indexOut);
      }
      SimpleTextUtil.writeChecksum(indexOut, scratch);
    }
  }
}
 
源代码10 项目: lucene-solr   文件: CompletionFieldsConsumer.java
CompletionFieldsConsumer(String codecName, PostingsFormat delegatePostingsFormat, SegmentWriteState state) throws IOException {
  this.codecName = codecName;
  this.delegatePostingsFormatName = delegatePostingsFormat.getName();
  this.state = state;
  String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION);
  boolean success = false;
  try {
    this.delegateFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    dictOut = state.directory.createOutput(dictFile, state.context);
    CodecUtil.writeIndexHeader(dictOut, codecName, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (success == false) {
      IOUtils.closeWhileHandlingException(dictOut, delegateFieldsConsumer);
    }
  }
}
 
源代码11 项目: lucene-solr   文件: RAMOnlyPostingsFormat.java
@Override
public FieldsProducer fieldsProducer(SegmentReadState readState)
  throws IOException {

  // Load our ID:
  final String idFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, ID_EXTENSION);
  IndexInput in = readState.directory.openInput(idFileName, readState.context);
  boolean success = false;
  final int id;
  try {
    CodecUtil.checkHeader(in, RAM_ONLY_NAME, VERSION_START, VERSION_LATEST);
    id = in.readVInt();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(in);
    } else {
      IOUtils.close(in);
    }
  }
  
  synchronized(state) {
    return state.get(id);
  }
}
 
源代码12 项目: lucene-solr   文件: Lucene50LiveDocsFormat.java
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  long gen = info.getNextDelGen();
  String name = IndexFileNames.fileNameFromGeneration(info.info.name, EXTENSION, gen);
  int delCount = 0;
  try (IndexOutput output = dir.createOutput(name, context)) {
    CodecUtil.writeIndexHeader(output, CODEC_NAME, VERSION_CURRENT, info.info.getId(), Long.toString(gen, Character.MAX_RADIX));
    final int longCount = FixedBitSet.bits2words(bits.length());
    for (int i = 0; i < longCount; ++i) {
      long currentBits = 0;
      for (int j = i << 6, end = Math.min(j + 63, bits.length() - 1); j <= end; ++j) {
        if (bits.get(j)) {
          currentBits |= 1L << j; // mod 64
        } else {
          delCount += 1;
        }
      }
      output.writeLong(currentBits);
    }
    CodecUtil.writeFooter(output);
  }
  if (delCount != info.getDelCount() + newDelCount) {
    throw new CorruptIndexException("bits.deleted=" + delCount + 
        " info.delcount=" + info.getDelCount() + " newdelcount=" + newDelCount, name);
  }
}
 
源代码13 项目: lucene-solr   文件: Lucene80NormsConsumer.java
Lucene80NormsConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
源代码14 项目: lucene-solr   文件: Lucene80DocValuesConsumer.java
/** expert: Creates a new writer */
public Lucene80DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    this.state = state;
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
源代码15 项目: lucene-solr   文件: BlockDirectory.java
/**
 * Determine whether write caching should be used for a particular
 * file/context.
 */
boolean useWriteCache(String name, IOContext context) {
  if (!blockCacheWriteEnabled || name.startsWith(IndexFileNames.PENDING_SEGMENTS)) {
    // for safety, don't bother caching pending commits.
    // the cache does support renaming (renameCacheFile), but thats a scary optimization.
    return false;
  }
  if (blockCacheFileTypes != null && !isCachableFile(name)) {
    return false;
  }
  switch (context.context) {
    case MERGE: {
      // we currently don't cache any merge context writes
      return false;
    }
    default: {
      return true;
    }
  }
}
 
public DiskDocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeHeader(data, dataCodec, DiskDocValuesFormat.VERSION_CURRENT);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeHeader(meta, metaCodec, DiskDocValuesFormat.VERSION_CURRENT);
    maxDoc = state.segmentInfo.getDocCount();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
源代码17 项目: crate   文件: Lucene.java
/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails.
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
            .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
            .setMergePolicy(NoMergePolicy.INSTANCE) // no merges
            .setCommitOnClose(false) // no commits
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { // force creation - don't append...
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}
 
public CompletionFieldsProducer(SegmentReadState state) throws IOException {
    String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
    IndexInput input = state.directory.openInput(suggestFSTFile, state.context);
    version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT);
    FieldsProducer delegateProducer = null;
    boolean success = false;
    try {
        PostingsFormat delegatePostingsFormat = PostingsFormat.forName(input.readString());
        String providerName = input.readString();
        CompletionLookupProvider completionLookupProvider = providers.get(providerName);
        if (completionLookupProvider == null) {
            throw new IllegalStateException("no provider with name [" + providerName + "] registered");
        }
        // TODO: we could clone the ReadState and make it always forward IOContext.MERGE to prevent unecessary heap usage?
        delegateProducer = delegatePostingsFormat.fieldsProducer(state);
        /*
         * If we are merging we don't load the FSTs at all such that we
         * don't consume so much memory during merge
         */
        if (state.context.context != Context.MERGE) {
            // TODO: maybe we can do this in a fully lazy fashion based on some configuration
            // eventually we should have some kind of curciut breaker that prevents us from going OOM here
            // with some configuration
            this.lookupFactory = completionLookupProvider.load(input);
        } else {
            this.lookupFactory = null;
        }
        this.delegateProducer = delegateProducer;
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(delegateProducer, input);
        } else {
            IOUtils.close(input);
        }
    }
}
 
源代码19 项目: lucene-solr   文件: IndexReplicationHandler.java
/**
 * Cleans up the index directory from old index files. This method uses the
 * last commit found by {@link #getLastCommit(Directory)}. If it matches the
 * expected segmentsFile, then all files not referenced by this commit point
 * are deleted.
 * <p>
 * <b>NOTE:</b> this method does a best effort attempt to clean the index
 * directory. It suppresses any exceptions that occur, as this can be retried
 * the next time.
 */
public static void cleanupOldIndexFiles(Directory dir, String segmentsFile, InfoStream infoStream) {
  try {
    IndexCommit commit = getLastCommit(dir);
    // commit == null means weird IO errors occurred, ignore them
    // if there were any IO errors reading the expected commit point (i.e.
    // segments files mismatch), then ignore that commit either.
    if (commit != null && commit.getSegmentsFileName().equals(segmentsFile)) {
      Set<String> commitFiles = new HashSet<>(commit.getFileNames());
      Matcher matcher = IndexFileNames.CODEC_FILE_PATTERN.matcher("");
      for (String file : dir.listAll()) {
        if (!commitFiles.contains(file)
            && (matcher.reset(file).matches() || file.startsWith(IndexFileNames.SEGMENTS))) {
          // suppress exceptions, it's just a best effort
          IOUtils.deleteFilesIgnoringExceptions(dir, file);
        }
      }
    }
  } catch (Throwable t) {
    // ignore any errors that happen during this state and only log it. this
    // cleanup will have a chance to succeed the next time we get a new
    // revision.
    if (infoStream.isEnabled(INFO_STREAM_COMPONENT)) {
      infoStream.message(INFO_STREAM_COMPONENT, "cleanupOldIndexFiles(): failed on error " + t.getMessage());
    }
  }
}
 
源代码20 项目: lucene-solr   文件: IndexAndTaxonomyRevisionTest.java
@Test
public void testRevisionRelease() throws Exception {
  Directory indexDir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  
  Directory taxoDir = newDirectory();
  SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
  try {
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    Revision rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
    // releasing that revision should not delete the files
    rev1.release();
    assertTrue(slowFileExists(indexDir, IndexFileNames.SEGMENTS + "_1"));
    assertTrue(slowFileExists(taxoDir, IndexFileNames.SEGMENTS + "_1"));
    
    rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); // create revision again, so the files are snapshotted
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    assertNotNull(new IndexAndTaxonomyRevision(indexWriter, taxoWriter));
    rev1.release(); // this release should trigger the delete of segments_1
    assertFalse(slowFileExists(indexDir, IndexFileNames.SEGMENTS + "_1"));
    indexWriter.close();
  } finally {
    IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
  }
}
 
源代码21 项目: lucene-solr   文件: LocalReplicatorTest.java
@Test
public void testRevisionRelease() throws Exception {
  replicator.publish(createRevision(1));
  assertTrue(slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_1"));
  replicator.publish(createRevision(2));
  // now the files of revision 1 can be deleted
  assertTrue(slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_2"));
  assertFalse("segments_1 should not be found in index directory after revision is released", slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_1"));
}
 
源代码22 项目: lucene-solr   文件: FSTTermsReader.java
public FSTTermsReader(SegmentReadState state, PostingsReaderBase postingsReader) throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, FSTTermsWriter.TERMS_EXTENSION);

  this.postingsReader = postingsReader;
  final IndexInput in = state.directory.openInput(termsFileName, state.context);

  boolean success = false;
  try {
    CodecUtil.checkIndexHeader(in, FSTTermsWriter.TERMS_CODEC_NAME,
                                     FSTTermsWriter.TERMS_VERSION_START,
                                     FSTTermsWriter.TERMS_VERSION_CURRENT,
                                     state.segmentInfo.getId(), state.segmentSuffix);
    CodecUtil.checksumEntireFile(in);
    this.postingsReader.init(in, state);
    seekDir(in);

    final FieldInfos fieldInfos = state.fieldInfos;
    final int numFields = in.readVInt();
    for (int i = 0; i < numFields; i++) {
      int fieldNumber = in.readVInt();
      FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber);
      long numTerms = in.readVLong();
      long sumTotalTermFreq = in.readVLong();
      // if frequencies are omitted, sumTotalTermFreq=sumDocFreq and we only write one value
      long sumDocFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS ? sumTotalTermFreq : in.readVLong();
      int docCount = in.readVInt();
      TermsReader current = new TermsReader(fieldInfo, in, numTerms, sumTotalTermFreq, sumDocFreq, docCount);
      TermsReader previous = fields.put(fieldInfo.name, current);
      checkFieldSummary(state.segmentInfo, in, current, previous);
    }
    success = true;
  } finally {
    if (success) {
      IOUtils.close(in);
    } else {
      IOUtils.closeWhileHandlingException(in);
    }
  }
}
 
源代码23 项目: lucene-solr   文件: OrdsBlockTreeTermsWriter.java
/** Create a new writer.  The number of items (terms or
 *  sub-blocks) per block will aim to be between
 *  minItemsPerBlock and maxItemsPerBlock, though in some
 *  cases the blocks may be smaller than the min. */
public OrdsBlockTreeTermsWriter(
                                SegmentWriteState state,
                                PostingsWriterBase postingsWriter,
                                int minItemsInBlock,
                                int maxItemsInBlock)
  throws IOException
{
  BlockTreeTermsWriter.validateSettings(minItemsInBlock, maxItemsInBlock);

  maxDoc = state.segmentInfo.maxDoc();

  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
  out = state.directory.createOutput(termsFileName, state.context);
  boolean success = false;
  IndexOutput indexOut = null;
  try {
    fieldInfos = state.fieldInfos;
    this.minItemsInBlock = minItemsInBlock;
    this.maxItemsInBlock = maxItemsInBlock;
    CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);

    final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
    indexOut = state.directory.createOutput(termsIndexFileName, state.context);
    CodecUtil.writeIndexHeader(indexOut, TERMS_INDEX_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);

    this.postingsWriter = postingsWriter;
    // segment = state.segmentInfo.name;

    // System.out.println("BTW.init seg=" + state.segmentName);

    postingsWriter.init(out, state);                          // have consumer write its format/header
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out, indexOut);
    }
  }
  this.indexOut = indexOut;
}
 
源代码24 项目: lucene-solr   文件: VariableGapTermsIndexWriter.java
public VariableGapTermsIndexWriter(SegmentWriteState state, IndexTermSelector policy) throws IOException {
  final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
  out = state.directory.createOutput(indexFileName, state.context);
  boolean success = false;
  try {
    fieldInfos = state.fieldInfos;
    this.policy = policy;
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
源代码25 项目: lucene-solr   文件: SimpleTextPointsReader.java
public SimpleTextPointsReader(SegmentReadState readState) throws IOException {
  // Initialize readers now:

  // Read index:
  Map<String,Long> fieldToFileOffset = new HashMap<>();

  String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointsFormat.POINT_INDEX_EXTENSION);
  try (ChecksumIndexInput in = readState.directory.openChecksumInput(indexFileName, IOContext.DEFAULT)) {
    readLine(in);
    int count = parseInt(FIELD_COUNT);
    for(int i=0;i<count;i++) {
      readLine(in);
      String fieldName = stripPrefix(FIELD_FP_NAME);
      readLine(in);
      long fp = parseLong(FIELD_FP);
      fieldToFileOffset.put(fieldName, fp);
    }
    SimpleTextUtil.checkFooter(in);
  }

  boolean success = false;
  String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointsFormat.POINT_EXTENSION);
  dataIn = readState.directory.openInput(fileName, IOContext.DEFAULT);
  try {
    for(Map.Entry<String,Long> ent : fieldToFileOffset.entrySet()) {
      readers.put(ent.getKey(), initReader(ent.getValue()));
    }
    success = true;
  } finally {
    if (success == false) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
      
  this.readState = readState;
}
 
源代码26 项目: lucene-solr   文件: SimpleTextStoredFieldsReader.java
public SimpleTextStoredFieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException {
  this.fieldInfos = fn;
  boolean success = false;
  try {
    in = directory.openInput(IndexFileNames.segmentFileName(si.name, "", SimpleTextStoredFieldsWriter.FIELDS_EXTENSION), context);
    success = true;
  } finally {
    if (!success) {
      try {
        close();
      } catch (Throwable t) {} // ensure we throw our original exception
    }
  }
  readIndex(si.maxDoc());
}
 
源代码27 项目: lucene-solr   文件: SimpleTextStoredFieldsWriter.java
public SimpleTextStoredFieldsWriter(Directory directory, String segment, IOContext context) throws IOException {
  this.directory = directory;
  this.segment = segment;
  boolean success = false;
  try {
    out = directory.createOutput(IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION), context);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
源代码28 项目: lucene-solr   文件: SimpleTextLiveDocsFormat.java
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  int size = bits.length();
  BytesRefBuilder scratch = new BytesRefBuilder();
  
  String fileName = IndexFileNames.fileNameFromGeneration(info.info.name, LIVEDOCS_EXTENSION, info.getNextDelGen());
  IndexOutput out = null;
  boolean success = false;
  try {
    out = dir.createOutput(fileName, context);
    SimpleTextUtil.write(out, SIZE);
    SimpleTextUtil.write(out, Integer.toString(size), scratch);
    SimpleTextUtil.writeNewline(out);
    
    for (int i = 0; i < size; ++i) {
      if (bits.get(i)) {
        SimpleTextUtil.write(out, DOC);
        SimpleTextUtil.write(out, Integer.toString(i), scratch);
        SimpleTextUtil.writeNewline(out);
      }
    }
    
    SimpleTextUtil.write(out, END);
    SimpleTextUtil.writeNewline(out);
    SimpleTextUtil.writeChecksum(out, scratch);
    success = true;
  } finally {
    if (success) {
      IOUtils.close(out);
    } else {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
源代码29 项目: lucene-solr   文件: SimpleTextTermVectorsReader.java
public SimpleTextTermVectorsReader(Directory directory, SegmentInfo si, IOContext context) throws IOException {
  boolean success = false;
  try {
    in = directory.openInput(IndexFileNames.segmentFileName(si.name, "", VECTORS_EXTENSION), context);
    success = true;
  } finally {
    if (!success) {
      try {
        close();
      } catch (Throwable t) {} // ensure we throw our original exception
    }
  }
  readIndex(si.maxDoc());
}
 
源代码30 项目: lucene-solr   文件: SimpleTextTermVectorsWriter.java
public SimpleTextTermVectorsWriter(Directory directory, String segment, IOContext context) throws IOException {
  this.directory = directory;
  this.segment = segment;
  boolean success = false;
  try {
    out = directory.createOutput(IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION), context);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
 类所在包
 类方法
 同包方法