org.apache.lucene.index.IndexWriterConfig#setUseCompoundFile ( )源码实例Demo

下面列出了org.apache.lucene.index.IndexWriterConfig#setUseCompoundFile ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: vscode-extension   文件: test.java
private IndexWriterConfig getIndexWriterConfig() {
    final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
    iwc.setCommitOnClose(false); // we by default don't commit on close
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    iwc.setIndexDeletionPolicy(combinedDeletionPolicy);
    // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
    boolean verbose = false;
    try {
        verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
    } catch (Exception ignore) {
    }
    iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
    iwc.setMergeScheduler(mergeScheduler);
    // Give us the opportunity to upgrade old segments while performing
    // background merges
    MergePolicy mergePolicy = config().getMergePolicy();
    // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
    iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
    if (softDeleteEnabled) {
        mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
            new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
    }
    iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy));
    iwc.setSimilarity(engineConfig.getSimilarity());
    iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
    iwc.setCodec(engineConfig.getCodec());
    iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
    if (config().getIndexSort() != null) {
        iwc.setIndexSort(config().getIndexSort());
    }
    return iwc;
}
 
源代码2 项目: lucene-solr   文件: SolrIndexConfig.java
public IndexWriterConfig toIndexWriterConfig(SolrCore core) throws IOException {
  IndexSchema schema = core.getLatestSchema();
  IndexWriterConfig iwc = new IndexWriterConfig(new DelayedSchemaAnalyzer(core));
  if (maxBufferedDocs != -1)
    iwc.setMaxBufferedDocs(maxBufferedDocs);

  if (ramBufferSizeMB != -1)
    iwc.setRAMBufferSizeMB(ramBufferSizeMB);

  if (ramPerThreadHardLimitMB != -1) {
    iwc.setRAMPerThreadHardLimitMB(ramPerThreadHardLimitMB);
  }

  iwc.setSimilarity(schema.getSimilarity());
  MergePolicy mergePolicy = buildMergePolicy(core.getResourceLoader(), schema);
  iwc.setMergePolicy(mergePolicy);
  MergeScheduler mergeScheduler = buildMergeScheduler(core.getResourceLoader());
  iwc.setMergeScheduler(mergeScheduler);
  iwc.setInfoStream(infoStream);

  if (mergePolicy instanceof SortingMergePolicy) {
    Sort indexSort = ((SortingMergePolicy) mergePolicy).getSort();
    iwc.setIndexSort(indexSort);
  }

  iwc.setUseCompoundFile(useCompoundFile);

  if (mergedSegmentWarmerInfo != null) {
    // TODO: add infostream -> normal logging system (there is an issue somewhere)
    @SuppressWarnings({"rawtypes"})
    IndexReaderWarmer warmer = core.getResourceLoader().newInstance(mergedSegmentWarmerInfo.className,
                                                                      IndexReaderWarmer.class,
                                                                      null,
                                                                      new Class[] { InfoStream.class },
                                                                      new Object[] { iwc.getInfoStream() });
    iwc.setMergedSegmentWarmer(warmer);
  }

  return iwc;
}
 
源代码3 项目: crate   文件: InternalEngine.java
private IndexWriterConfig getIndexWriterConfig() {
    final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
    iwc.setCommitOnClose(false); // we by default don't commit on close
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    iwc.setReaderAttributes(getReaderAttributes(store.directory()));
    iwc.setIndexDeletionPolicy(combinedDeletionPolicy);
    // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
    boolean verbose = false;
    try {
        verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
    } catch (Exception ignore) {
        // ignored
    }
    iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
    iwc.setMergeScheduler(mergeScheduler);
    // Give us the opportunity to upgrade old segments while performing
    // background merges
    MergePolicy mergePolicy = config().getMergePolicy();
    // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
    iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
    if (softDeleteEnabled) {
        mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
            new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
    }
    iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy));
    iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
    iwc.setCodec(engineConfig.getCodec());
    iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
    return iwc;
}
 
public void testDeletePartiallyWrittenFilesIfAbort() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random()));
  iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30));
  iwConf.setCodec(getCodec());
  // disable CFS because this test checks file names
  iwConf.setMergePolicy(newLogMergePolicy(false));
  iwConf.setUseCompoundFile(false);

  // Cannot use RIW because this test wants CFS to stay off:
  IndexWriter iw = new IndexWriter(dir, iwConf);

  final Document validDoc = new Document();
  validDoc.add(new IntPoint("id", 0));
  validDoc.add(new StoredField("id", 0));
  iw.addDocument(validDoc);
  iw.commit();
  
  // make sure that #writeField will fail to trigger an abort
  final Document invalidDoc = new Document();
  FieldType fieldType = new FieldType();
  fieldType.setStored(true);
  invalidDoc.add(new Field("invalid", fieldType) {
    
    @Override
    public String stringValue() {
      // TODO: really bad & scary that this causes IW to
      // abort the segment!!  We should fix this.
      return null;
    }
    
  });
  
  try {
    iw.addDocument(invalidDoc);
    iw.commit();
  } catch(IllegalArgumentException iae) {
    // expected
    assertEquals(iae, iw.getTragicException());
  }
  // Writer should be closed by tragedy
  assertFalse(iw.isOpen());
  dir.close();
}
 
源代码5 项目: mtas   文件: MtasSearchTestConsistency.java
/**
 * Creates the index.
 *
 * @param configFile the config file
 * @param files the files
 * @throws IOException Signals that an I/O exception has occurred.
 */
private static void createIndex(String configFile,
    HashMap<String, String> files) throws IOException {
  // analyzer
  Map<String, String> paramsCharFilterMtas = new HashMap<>();
  paramsCharFilterMtas.put("type", "file");
  Map<String, String> paramsTokenizer = new HashMap<>();
  paramsTokenizer.put("configFile", configFile);
  Analyzer mtasAnalyzer = CustomAnalyzer
      .builder(Paths.get("docker").toAbsolutePath())
      .addCharFilter("mtas", paramsCharFilterMtas)
      .withTokenizer("mtas", paramsTokenizer).build();
  Map<String, Analyzer> analyzerPerField = new HashMap<>();
  analyzerPerField.put(FIELD_CONTENT, mtasAnalyzer);
  PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(
      new StandardAnalyzer(), analyzerPerField);
  // indexwriter
  IndexWriterConfig config = new IndexWriterConfig(analyzer);
  config.setUseCompoundFile(false);
  config.setCodec(Codec.forName("MtasCodec"));
  IndexWriter w = new IndexWriter(directory, config);
  // delete
  w.deleteAll();
  // add
  int counter = 0;
  for (Entry<String, String> entry : files.entrySet()) {
    addDoc(w, counter, entry.getKey(), entry.getValue());
    if (counter == 0) {
      w.commit();
    } else {
      addDoc(w, counter, entry.getKey(), entry.getValue());
      addDoc(w, counter, "deletable", entry.getValue());
      w.commit();
      w.deleteDocuments(new Term(FIELD_ID, Integer.toString(counter)));
      w.deleteDocuments(new Term(FIELD_TITLE, "deletable"));
      addDoc(w, counter, entry.getKey(), entry.getValue());
    }
    counter++;
  }
  w.commit();
  // finish
  w.close();
}