类org.apache.lucene.index.MergePolicy源码实例Demo

下面列出了怎么用org.apache.lucene.index.MergePolicy的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: vscode-extension   文件: test.java
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
    engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
        @Override
        public void onFailure(Exception e) {
            logger.debug("merge failure action rejected", e);
        }

        @Override
        protected void doRun() throws Exception {
            /*
             * We do this on another thread rather than the merge thread that we are initially called on so that we have complete
             * confidence that the call stack does not contain catch statements that would cause the error that might be thrown
             * here from being caught and never reaching the uncaught exception handler.
             */
            failEngine("merge failed", new MergePolicy.MergeException(exc, dir));
        }
    });
}
 
源代码2 项目: Elasticsearch   文件: DLBasedEngine.java
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
    logger.error("failed to merge", exc);
    if (config().getMergeSchedulerConfig().isNotifyOnMergeFailure()) {
        engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
            @Override
            public void onFailure(Throwable t) {
                logger.debug("merge failure action rejected", t);
            }

            @Override
            protected void doRun() throws Exception {
                MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
                failEngine("merge failed", e);
            }
        });
    }
}
 
源代码3 项目: Elasticsearch   文件: InternalEngine.java
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
    logger.error("failed to merge", exc);
    if (config().getMergeSchedulerConfig().isNotifyOnMergeFailure()) {
        engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
            @Override
            public void onFailure(Throwable t) {
                logger.debug("merge failure action rejected", t);
            }

            @Override
            protected void doRun() throws Exception {
                MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir);
                failEngine("merge failed", e);
            }
        });
    }
}
 
源代码4 项目: linden   文件: SortingMergePolicyFactory.java
@Override
public MergePolicy getInstance(Map<String, String> params) throws IOException {
  String field = params.get(SORT_FIELD);
  SortField.Type sortFieldType = SortField.Type.DOC;
  if (params.containsKey(SORT_FIELD_TYPE)) {
    sortFieldType = SortField.Type.valueOf(params.get(SORT_FIELD_TYPE).toUpperCase());
  }

  if (sortFieldType == SortField.Type.DOC) {
    throw new IOException(
        "Relying on internal lucene DocIDs is not guaranteed to work, this is only an implementation detail.");
  }

  boolean desc = true;
  if (params.containsKey(SORT_DESC)) {
    try {
      desc = Boolean.valueOf(params.get(SORT_DESC));
    } catch (Exception e) {
      desc = true;
    }
  }
  SortField sortField = new SortField(field, sortFieldType, desc);
  Sort sort = new Sort(sortField);
  return new SortingMergePolicyDecorator(new TieredMergePolicy(), sort);
}
 
源代码5 项目: lucene-solr   文件: SimplePrimaryNode.java
private static IndexWriter initWriter(int id, Random random, Path indexPath, boolean doCheckIndexOnClose) throws IOException {
  Directory dir = SimpleReplicaNode.getDirectory(random, id, indexPath, doCheckIndexOnClose);

  MockAnalyzer analyzer = new MockAnalyzer(random);
  analyzer.setMaxTokenLength(TestUtil.nextInt(random, 1, IndexWriter.MAX_TERM_LENGTH));
  IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(random, analyzer);

  MergePolicy mp = iwc.getMergePolicy();
  //iwc.setInfoStream(new PrintStreamInfoStream(System.out));

  // Force more frequent merging so we stress merge warming:
  if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setSegmentsPerTier(3);
    tmp.setMaxMergeAtOnce(3);
  } else if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(3);
  }

  IndexWriter writer = new IndexWriter(dir, iwc);

  TestUtil.reduceOpenFiles(writer);
  return writer;
}
 
源代码6 项目: lucene-solr   文件: TestUtil.java
/** just tries to configure things to keep the open file
 * count lowish */
public static void reduceOpenFiles(IndexWriter w) {
  // keep number of open files lowish
  MergePolicy mp = w.getConfig().getMergePolicy();
  mp.setNoCFSRatio(1.0);
  if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(Math.min(5, lmp.getMergeFactor()));
  } else if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setMaxMergeAtOnce(Math.min(5, tmp.getMaxMergeAtOnce()));
    tmp.setSegmentsPerTier(Math.min(5, tmp.getSegmentsPerTier()));
  }
  MergeScheduler ms = w.getConfig().getMergeScheduler();
  if (ms instanceof ConcurrentMergeScheduler) {
    // wtf... shouldnt it be even lower since it's 1 by default?!?!
    ((ConcurrentMergeScheduler) ms).setMaxMergesAndThreads(3, 2);
  }
}
 
源代码7 项目: lucene-solr   文件: SolrIndexConfig.java
/**
 * Builds a MergePolicy using the configured MergePolicyFactory
 * or if no factory is configured uses the configured mergePolicy PluginInfo.
 */
@SuppressWarnings({"unchecked", "rawtypes"})
private MergePolicy buildMergePolicy(SolrResourceLoader resourceLoader, IndexSchema schema) {

  final String mpfClassName;
  final MergePolicyFactoryArgs mpfArgs;
  if (mergePolicyFactoryInfo == null) {
    mpfClassName = DEFAULT_MERGE_POLICY_FACTORY_CLASSNAME;
    mpfArgs = new MergePolicyFactoryArgs();
  } else {
    mpfClassName = mergePolicyFactoryInfo.className;
    mpfArgs = new MergePolicyFactoryArgs(mergePolicyFactoryInfo.initArgs);
  }

  final MergePolicyFactory mpf = resourceLoader.newInstance(
      mpfClassName,
      MergePolicyFactory.class,
      NO_SUB_PACKAGES,
      new Class[] { SolrResourceLoader.class, MergePolicyFactoryArgs.class, IndexSchema.class },
      new Object[] {resourceLoader, mpfArgs, schema });

  return mpf.getMergePolicy();
}
 
源代码8 项目: lucene-solr   文件: SegmentsInfoRequestHandler.java
private SimpleOrderedMap<Object> getMergeInformation(SolrQueryRequest req, SegmentInfos infos, List<String> mergeCandidates) throws IOException {
  SimpleOrderedMap<Object> result = new SimpleOrderedMap<>();
  RefCounted<IndexWriter> refCounted = req.getCore().getSolrCoreState().getIndexWriter(req.getCore());
  try {
    IndexWriter indexWriter = refCounted.get();
    if (indexWriter instanceof SolrIndexWriter) {
      result.addAll(((SolrIndexWriter)indexWriter).getRunningMerges());
    }
    //get chosen merge policy
    MergePolicy mp = indexWriter.getConfig().getMergePolicy();
    //Find merges
    MergeSpecification findMerges = mp.findMerges(MergeTrigger.EXPLICIT, infos, indexWriter);
    if (findMerges != null && findMerges.merges != null && findMerges.merges.size() > 0) {
      for (OneMerge merge : findMerges.merges) {
        //TODO: add merge grouping
        for (SegmentCommitInfo mergeSegmentInfo : merge.segments) {
          mergeCandidates.add(mergeSegmentInfo.info.name);
        }
      }
    }

    return result;
  } finally {
    refCounted.decref();
  }
}
 
源代码9 项目: lucene-solr   文件: SolrIndexConfigTest.java
public void testSortingMPSolrIndexConfigCreation() throws Exception {
  final String expectedFieldName = "timestamp_i_dvo";
  final SortField.Type expectedFieldType = SortField.Type.INT;
  final boolean expectedFieldSortDescending = true;

  SolrConfig solrConfig = new SolrConfig(instanceDir, solrConfigFileNameSortingMergePolicyFactory);
  SolrIndexConfig solrIndexConfig = new SolrIndexConfig(solrConfig, null, null);
  assertNotNull(solrIndexConfig);
  IndexSchema indexSchema = IndexSchemaFactory.buildIndexSchema(schemaFileName, solrConfig);

  h.getCore().setLatestSchema(indexSchema);
  IndexWriterConfig iwc = solrIndexConfig.toIndexWriterConfig(h.getCore());

  final MergePolicy mergePolicy = iwc.getMergePolicy();
  assertNotNull("null mergePolicy", mergePolicy);
  assertTrue("mergePolicy ("+mergePolicy+") is not a SortingMergePolicy", mergePolicy instanceof SortingMergePolicy);
  final SortingMergePolicy sortingMergePolicy = (SortingMergePolicy) mergePolicy;
  final Sort expected = new Sort(new SortField(expectedFieldName, expectedFieldType, expectedFieldSortDescending));
  final Sort actual = sortingMergePolicy.getSort();
  assertEquals("SortingMergePolicy.getSort", expected, actual);
}
 
public void testProperlyInitializesWrappedMergePolicy() {
  final TieredMergePolicy defaultTMP = new TieredMergePolicy();
  final int testMaxMergeAtOnce = defaultTMP.getMaxMergeAtOnce() * 2;
  final double testMaxMergedSegmentMB = defaultTMP.getMaxMergedSegmentMB() * 10;

  final MergePolicyFactoryArgs args = new MergePolicyFactoryArgs();
  args.add(WrapperMergePolicyFactory.WRAPPED_PREFIX, "test");
  args.add("test.class", TieredMergePolicyFactory.class.getName());
  args.add("test.maxMergeAtOnce", testMaxMergeAtOnce);
  args.add("test.maxMergedSegmentMB", testMaxMergedSegmentMB);
  MergePolicyFactory mpf = new DefaultingWrapperMergePolicyFactory(resourceLoader, args, null) {
    @Override
    protected MergePolicy getDefaultWrappedMergePolicy() {
      throw new IllegalStateException("Should not have reached here!");
    }
  };
  final MergePolicy mp = mpf.getMergePolicy();
  assertSame(mp.getClass(), TieredMergePolicy.class);
  final TieredMergePolicy tmp = (TieredMergePolicy)mp;
  assertEquals("maxMergeAtOnce", testMaxMergeAtOnce, tmp.getMaxMergeAtOnce());
  assertEquals("maxMergedSegmentMB", testMaxMergedSegmentMB, tmp.getMaxMergedSegmentMB(), 0.0d);
}
 
源代码11 项目: crate   文件: InternalEngine.java
@Override
protected void handleMergeException(final Directory dir, final Throwable exc) {
    engineConfig.getThreadPool().generic().execute(new AbstractRunnable() {
        @Override
        public void onFailure(Exception e) {
            logger.debug("merge failure action rejected", e);
        }

        @Override
        protected void doRun() throws Exception {
            /*
             * We do this on another thread rather than the merge thread that we are initially called on so that we have complete
             * confidence that the call stack does not contain catch statements that would cause the error that might be thrown
             * here from being caught and never reaching the uncaught exception handler.
             */
            failEngine("merge failed", new MergePolicy.MergeException(exc, dir));
        }
    });
}
 
源代码12 项目: crate   文件: EngineTestCase.java
protected InternalEngine createEngine(
    IndexSettings indexSettings,
    Store store,
    Path translogPath,
    MergePolicy mergePolicy,
    @Nullable IndexWriterFactory indexWriterFactory,
    @Nullable BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier,
    @Nullable LongSupplier globalCheckpointSupplier,
    @Nullable ToLongBiFunction<Engine, Engine.Operation> seqNoForOperation) throws IOException {
    return createEngine(
        indexSettings,
        store,
        translogPath,
        mergePolicy,
        indexWriterFactory,
        localCheckpointTrackerSupplier,
        seqNoForOperation,
        globalCheckpointSupplier);
}
 
源代码13 项目: vscode-extension   文件: test.java
private IndexWriterConfig getIndexWriterConfig() {
    final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
    iwc.setCommitOnClose(false); // we by default don't commit on close
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    iwc.setIndexDeletionPolicy(combinedDeletionPolicy);
    // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
    boolean verbose = false;
    try {
        verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
    } catch (Exception ignore) {
    }
    iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
    iwc.setMergeScheduler(mergeScheduler);
    // Give us the opportunity to upgrade old segments while performing
    // background merges
    MergePolicy mergePolicy = config().getMergePolicy();
    // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
    iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
    if (softDeleteEnabled) {
        mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
            new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
    }
    iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy));
    iwc.setSimilarity(engineConfig.getSimilarity());
    iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
    iwc.setCodec(engineConfig.getCodec());
    iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
    if (config().getIndexSort() != null) {
        iwc.setIndexSort(config().getIndexSort());
    }
    return iwc;
}
 
源代码14 项目: Elasticsearch   文件: EngineConfig.java
/**
 * Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
 */
public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService,
                    Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
                    MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
                    Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
                    TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, IndexSearcherWrappingService wrappingService, TranslogConfig translogConfig) {
    this.shardId = shardId;
    this.indexSettings = indexSettings;
    this.threadPool = threadPool;
    this.indexingService = indexingService;
    this.warmer = warmer;
    this.store = store;
    this.deletionPolicy = deletionPolicy;
    this.mergePolicy = mergePolicy;
    this.mergeSchedulerConfig = mergeSchedulerConfig;
    this.analyzer = analyzer;
    this.similarity = similarity;
    this.codecService = codecService;
    this.failedEngineListener = failedEngineListener;
    this.wrappingService = wrappingService;
    this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false);
    this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
    codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
    // We start up inactive and rely on IndexingMemoryController to give us our fair share once we start indexing:
    indexingBufferSize = IndexingMemoryController.INACTIVE_SHARD_INDEXING_BUFFER;
    gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
    versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE);
    updateVersionMapSize();
    this.translogRecoveryPerformer = translogRecoveryPerformer;
    this.forceNewTranslog = indexSettings.getAsBoolean(INDEX_FORCE_NEW_TRANSLOG, false);
    this.queryCache = queryCache;
    this.queryCachingPolicy = queryCachingPolicy;
    this.translogConfig = translogConfig;
}
 
源代码15 项目: linden   文件: TieredMergePolicyFactory.java
@Override
public MergePolicy getInstance(Map<String, String> config) throws IOException {
  TieredMergePolicy mergePolicy = new TieredMergePolicy();

  if (config.containsKey(SEGMENTS_PER_TIER)) {
    mergePolicy.setSegmentsPerTier(Double.valueOf(config.get(SEGMENTS_PER_TIER)));
  }
  if (config.containsKey(MAX_MERGE_AT_ONCE)) {
    mergePolicy.setMaxMergeAtOnce(Integer.valueOf(config.get(MAX_MERGE_AT_ONCE)));
  }
  return mergePolicy;
}
 
源代码16 项目: linden   文件: LindenConfig.java
public IndexWriterConfig createIndexWriterConfig() throws IOException {
  IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LATEST, getIndexAnalyzerInstance());
  indexWriterConfig.setRAMBufferSizeMB(48);

  MergePolicy mergePolicy = getPluginManager().getInstance(LindenConfigBuilder.MERGE_POLICY, MergePolicy.class);
  if (mergePolicy != null) {
    indexWriterConfig.setMergePolicy(mergePolicy);
  }
  LOGGER.info("Merge policy : {}", mergePolicy == null ? "Default" : mergePolicy);

  ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
  cms.setMaxMergesAndThreads(8, 1);
  indexWriterConfig.setMergeScheduler(cms);
  return indexWriterConfig;
}
 
源代码17 项目: lucene-solr   文件: IndexMergeTool.java
static Options parse(String args[]) throws ReflectiveOperationException {
  Options options = new Options();
  int index = 0;
  while (index < args.length) {
    if (!args[index].startsWith("-")) {
      break;
    }
    if (args[index] == "--") {
      break;
    }
    switch(args[index]) {
      case "-merge-policy":
        String clazzName = args[++index];
        Class<? extends MergePolicy> clazz = Class.forName(clazzName).asSubclass(MergePolicy.class);
        options.config.setMergePolicy(clazz.getConstructor().newInstance());
        break;
      case "-max-segments":
        options.maxSegments = Integer.parseInt(args[++index]);
        break;
      case "-verbose":
        options.config.setInfoStream(System.err);
        break;
      default: throw new IllegalArgumentException("unrecognized option: '" + args[index] + "'\n" + USAGE);
    }
    index++;
  }

  // process any remaining arguments as the target and source index paths.
  int numPaths = args.length - index;
  if (numPaths < 3) {
    throw new IllegalArgumentException("not enough parameters.\n" + USAGE);
  }

  options.mergedIndexPath = args[index];
  options.indexPaths = new String[numPaths - 1];
  System.arraycopy(args, index + 1, options.indexPaths, 0, options.indexPaths.length);
  return options;
}
 
源代码18 项目: lucene-solr   文件: TestMergeSchedulerExternal.java
@Override
protected MergeThread getMergeThread(MergeSource mergeSource, MergePolicy.OneMerge merge) throws IOException {
  MergeThread thread = new MyMergeThread(mergeSource, merge);
  thread.setDaemon(true);
  thread.setName("MyMergeThread");
  return thread;
}
 
源代码19 项目: lucene-solr   文件: DefaultSolrCoreState.java
public Sort getMergePolicySort() throws IOException {
  lock(iwLock.readLock());
  try {
    if (indexWriter != null) {
      final MergePolicy mergePolicy = indexWriter.getConfig().getMergePolicy();
      if (mergePolicy instanceof SortingMergePolicy) {
        return ((SortingMergePolicy)mergePolicy).getSort();
      }
    }
  } finally {
    iwLock.readLock().unlock();
  }
  return null;
}
 
源代码20 项目: lucene-solr   文件: SolrIndexConfig.java
public IndexWriterConfig toIndexWriterConfig(SolrCore core) throws IOException {
  IndexSchema schema = core.getLatestSchema();
  IndexWriterConfig iwc = new IndexWriterConfig(new DelayedSchemaAnalyzer(core));
  if (maxBufferedDocs != -1)
    iwc.setMaxBufferedDocs(maxBufferedDocs);

  if (ramBufferSizeMB != -1)
    iwc.setRAMBufferSizeMB(ramBufferSizeMB);

  if (ramPerThreadHardLimitMB != -1) {
    iwc.setRAMPerThreadHardLimitMB(ramPerThreadHardLimitMB);
  }

  iwc.setSimilarity(schema.getSimilarity());
  MergePolicy mergePolicy = buildMergePolicy(core.getResourceLoader(), schema);
  iwc.setMergePolicy(mergePolicy);
  MergeScheduler mergeScheduler = buildMergeScheduler(core.getResourceLoader());
  iwc.setMergeScheduler(mergeScheduler);
  iwc.setInfoStream(infoStream);

  if (mergePolicy instanceof SortingMergePolicy) {
    Sort indexSort = ((SortingMergePolicy) mergePolicy).getSort();
    iwc.setIndexSort(indexSort);
  }

  iwc.setUseCompoundFile(useCompoundFile);

  if (mergedSegmentWarmerInfo != null) {
    // TODO: add infostream -> normal logging system (there is an issue somewhere)
    @SuppressWarnings({"rawtypes"})
    IndexReaderWarmer warmer = core.getResourceLoader().newInstance(mergedSegmentWarmerInfo.className,
                                                                      IndexReaderWarmer.class,
                                                                      null,
                                                                      new Class[] { InfoStream.class },
                                                                      new Object[] { iwc.getInfoStream() });
    iwc.setMergedSegmentWarmer(warmer);
  }

  return iwc;
}
 
源代码21 项目: lucene-solr   文件: WrapperMergePolicyFactory.java
/** Returns an instance of the wrapped {@link MergePolicy} after it has been configured with all set parameters. */
@SuppressWarnings({"rawtypes"})
protected final MergePolicy getWrappedMergePolicy() {
  if (wrappedMergePolicyArgs == null) {
    return getDefaultWrappedMergePolicy();
  }

  final MergePolicyFactory mpf = resourceLoader.newInstance(
      wrappedMergePolicyClassName,
      MergePolicyFactory.class,
      NO_SUB_PACKAGES,
      new Class[] {SolrResourceLoader.class, MergePolicyFactoryArgs.class, IndexSchema.class},
      new Object[] {resourceLoader, wrappedMergePolicyArgs, schema});
  return mpf.getMergePolicy();
}
 
源代码22 项目: lucene-solr   文件: WrapperMergePolicyFactory.java
/** Returns a wrapping {@link MergePolicy} with its set parameters configured. */
@Override
public final MergePolicy getMergePolicy() {
  final MergePolicy wrappedMP = getWrappedMergePolicy();
  final MergePolicy mp = getMergePolicyInstance(wrappedMP);
  args.invokeSetters(mp);
  return mp;
}
 
private void implTestUpgradeIndexMergePolicyFactory(Double wrappingNoCFSRatio, Double wrappedNoCFSRatio) {
  final MergePolicyFactoryArgs args = new MergePolicyFactoryArgs();
  if (wrappingNoCFSRatio != null) {
    args.add("noCFSRatio", wrappingNoCFSRatio); // noCFSRatio for the wrapping merge policy
  }
  args.add(WrapperMergePolicyFactory.WRAPPED_PREFIX, "wrapped");
  args.add("wrapped.class", TieredMergePolicyFactory.class.getName());
  if (wrappedNoCFSRatio != null) {
    args.add("wrapped.noCFSRatio", wrappedNoCFSRatio); // noCFSRatio for the wrapped merge policy
  }

  MergePolicyFactory mpf;
  try {
    mpf = new UpgradeIndexMergePolicyFactory(resourceLoader, args, null);
    assertFalse("Should only reach here if wrapping and wrapped args don't overlap!",
        (wrappingNoCFSRatio != null && wrappedNoCFSRatio != null));

    for (int ii=1; ii<=2; ++ii) { // it should be okay to call getMergePolicy() more than once
      final MergePolicy mp = mpf.getMergePolicy();
      if (wrappingNoCFSRatio != null) {
        assertEquals("#"+ii+" wrappingNoCFSRatio", wrappingNoCFSRatio.doubleValue(), mp.getNoCFSRatio(), 0.0d);
      }
      if (wrappedNoCFSRatio != null) {
        assertEquals("#"+ii+" wrappedNoCFSRatio", wrappedNoCFSRatio.doubleValue(), mp.getNoCFSRatio(), 0.0d);
      }
      assertSame(mp.getClass(), UpgradeIndexMergePolicy.class);
    }

  } catch (IllegalArgumentException iae) {
    assertEquals("Wrapping and wrapped merge policy args overlap! [noCFSRatio]", iae.getMessage());
    assertTrue("Should only reach here if wrapping and wrapped args do overlap!",
        (wrappingNoCFSRatio != null && wrappedNoCFSRatio != null));
  }
}
 
@Override
@SuppressWarnings("sync-override")
protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
    MergeThread thread = super.getMergeThread(writer, merge);
    thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + thread.getName()));
    return thread;
}
 
源代码25 项目: crate   文件: RecoverySourcePruneMergePolicy.java
RecoverySourcePruneMergePolicy(String recoverySourceField, Supplier<Query> retainSourceQuerySupplier, MergePolicy in) {
    super(in, toWrap -> new OneMerge(toWrap.segments) {
        @Override
        public CodecReader wrapForMerge(CodecReader reader) throws IOException {
            CodecReader wrapped = toWrap.wrapForMerge(reader);
            return wrapReader(recoverySourceField, wrapped, retainSourceQuerySupplier);
        }
    });
}
 
源代码26 项目: crate   文件: InternalEngine.java
private IndexWriterConfig getIndexWriterConfig() {
    final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer());
    iwc.setCommitOnClose(false); // we by default don't commit on close
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    iwc.setReaderAttributes(getReaderAttributes(store.directory()));
    iwc.setIndexDeletionPolicy(combinedDeletionPolicy);
    // with tests.verbose, lucene sets this up: plumb to align with filesystem stream
    boolean verbose = false;
    try {
        verbose = Boolean.parseBoolean(System.getProperty("tests.verbose"));
    } catch (Exception ignore) {
        // ignored
    }
    iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger));
    iwc.setMergeScheduler(mergeScheduler);
    // Give us the opportunity to upgrade old segments while performing
    // background merges
    MergePolicy mergePolicy = config().getMergePolicy();
    // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
    iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
    if (softDeleteEnabled) {
        mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
            new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
    }
    iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy));
    iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac());
    iwc.setCodec(engineConfig.getCodec());
    iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
    return iwc;
}
 
源代码27 项目: crate   文件: EngineTestCase.java
public EngineConfig copy(EngineConfig config, MergePolicy mergePolicy) {
    return new EngineConfig(config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(),
        config.getStore(), mergePolicy, config.getAnalyzer(),
        new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(),
        config.getTranslogConfig(), config.getFlushMergesAfter(),
        config.getExternalRefreshListener(), Collections.emptyList(),
        config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier(),
        config.getTombstoneDocSupplier());
}
 
源代码28 项目: crate   文件: EngineTestCase.java
protected InternalEngine createEngine(
    IndexSettings indexSettings,
    Store store,
    Path translogPath,
    MergePolicy mergePolicy,
    @Nullable IndexWriterFactory indexWriterFactory,
    @Nullable BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier,
    @Nullable LongSupplier globalCheckpointSupplier) throws IOException {
    return createEngine(
        indexSettings, store, translogPath, mergePolicy, indexWriterFactory, localCheckpointTrackerSupplier, null,
        globalCheckpointSupplier);
}
 
源代码29 项目: crate   文件: EngineTestCase.java
protected InternalEngine createEngine(
    IndexSettings indexSettings,
    Store store,
    Path translogPath,
    MergePolicy mergePolicy,
    @Nullable IndexWriterFactory indexWriterFactory,
    @Nullable BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier,
    @Nullable ToLongBiFunction<Engine, Engine.Operation> seqNoForOperation,
    @Nullable LongSupplier globalCheckpointSupplier) throws IOException {
    EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null, globalCheckpointSupplier);
    return createEngine(indexWriterFactory, localCheckpointTrackerSupplier, seqNoForOperation, config);
}
 
源代码30 项目: Elasticsearch   文件: EngineConfig.java
/**
 * Returns the {@link org.apache.lucene.index.MergePolicy} for the engines {@link org.apache.lucene.index.IndexWriter}
 */
public MergePolicy getMergePolicy() {
    return mergePolicy;
}
 
 类所在包
 类方法
 同包方法