org.apache.lucene.search.QueryCachingPolicy#org.elasticsearch.index.IndexSettings源码实例Demo

下面列出了org.apache.lucene.search.QueryCachingPolicy#org.elasticsearch.index.IndexSettings 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: crate   文件: IndicesService.java
/**
 * Returns <code>ShardDeletionCheckResult</code> signaling whether the shards content for the given shard can be deleted.
 *
 * @param shardId the shard to delete.
 * @param indexSettings the shards's relevant {@link IndexSettings}. This is required to access the indexes settings etc.
 */
public ShardDeletionCheckResult canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
    assert shardId.getIndex().equals(indexSettings.getIndex());
    final IndexService indexService = indexService(shardId.getIndex());
    if (nodeEnv.hasNodeFile()) {
        final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id());
        if (isAllocated) {
            return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard
        } else if (indexSettings.hasCustomDataPath()) {
            // lets see if it's on a custom path (return false if the shared doesn't exist)
            // we don't need to delete anything that is not there
            return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ?
                    ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE :
                    ShardDeletionCheckResult.NO_FOLDER_FOUND;
        } else {
            // lets see if it's path is available (return false if the shared doesn't exist)
            // we don't need to delete anything that is not there
            return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ?
                    ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE :
                    ShardDeletionCheckResult.NO_FOLDER_FOUND;
        }
    } else {
        return ShardDeletionCheckResult.NO_LOCAL_STORAGE;
    }
}
 
DelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
    String delimiterConf = settings.get(DELIMITER);
    if (delimiterConf != null) {
        delimiter = delimiterConf.charAt(0);
    } else {
        delimiter = DEFAULT_DELIMITER;
    }

    if (settings.get(ENCODING) != null) {
        if (settings.get(ENCODING).equals("float")) {
            encoder = new FloatEncoder();
        } else if (settings.get(ENCODING).equals("int")) {
            encoder = new IntegerEncoder();
        } else if (settings.get(ENCODING).equals("identity")) {
            encoder = new IdentityEncoder();
        } else {
            encoder = DEFAULT_ENCODER;
        }
    } else {
        encoder = DEFAULT_ENCODER;
    }
}
 
源代码3 项目: crate   文件: IndicesService.java
/**
 * Deletes the index store trying to acquire all shards locks for this index.
 * This method will delete the metadata for the index even if the actual shards can't be locked.
 *
 * Package private for testing
 */
void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState) throws IOException {
    if (nodeEnv.hasNodeFile()) {
        synchronized (this) {
            Index index = metaData.getIndex();
            if (hasIndex(index)) {
                String localUUid = indexService(index).indexUUID();
                throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
            }

            if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().getLocalNode().isMasterNode() == true)) {
                // we do not delete the store if it is a master eligible node and the index is still in the cluster state
                // because we want to keep the meta data for indices around even if no shards are left here
                final IndexMetaData idxMeta = clusterState.metaData().index(index.getName());
                throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the " +
                                                "cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "], " +
                                                "we are master eligible, so will keep the index metadata even if no shards are left.");
            }
        }
        final IndexSettings indexSettings = buildIndexSettings(metaData);
        deleteIndexStore(reason, indexSettings.getIndex(), indexSettings);
    }
}
 
public URLTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
    super(indexSettings, name, settings);

    String[] parts = settings.getAsArray("part");
    if (parts != null && parts.length > 0) {
        this.parts = Arrays.stream(parts)
                .map(URLPart::fromString)
                .collect(Collectors.toList());
    }
    this.urlDecode = settings.getAsBoolean("url_decode", false);
    this.tokenizeHost = settings.getAsBoolean("tokenize_host", true);
    this.tokenizePath = settings.getAsBoolean("tokenize_path", true);
    this.tokenizeQuery = settings.getAsBoolean("tokenize_query", true);
    this.allowMalformed = settings.getAsBoolean("allow_malformed", false);
    this.tokenizeMalformed = settings.getAsBoolean("tokenize_malformed", false);
}
 
源代码5 项目: crate   文件: DanishAnalyzerProvider.java
DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
    analyzer = new DanishAnalyzer(
        Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet()),
        Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
    );
    analyzer.setVersion(version);
}
 
源代码6 项目: vscode-extension   文件: test.java
public void onSettingsChanged() {
    mergeScheduler.refreshConfig();
    // config().isEnableGcDeletes() or config.getGcDeletesInMillis() may have changed:
    maybePruneDeletes();
    final TranslogDeletionPolicy translogDeletionPolicy = translog.getDeletionPolicy();
    final IndexSettings indexSettings = engineConfig.getIndexSettings();
    translogDeletionPolicy.setRetentionAgeInMillis(indexSettings.getTranslogRetentionAge().getMillis());
    translogDeletionPolicy.setRetentionSizeInBytes(indexSettings.getTranslogRetentionSize().getBytes());
    softDeletesPolicy.setRetentionOperations(indexSettings.getSoftDeleteRetentionOperations());
}
 
@Test
public void testTranslogSyncInterval() {
    BoundAlterTable analysis =
        analyze("alter table users set (\"translog.sync_interval\"='1s')");
    assertThat(analysis.table().ident().name(), is("users"));
    assertThat(analysis.tableParameter().settings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey()), is("1s"));
}
 
源代码8 项目: crate   文件: BlobIndicesService.java
@Override
public void afterIndexRemoved(Index index,
                              IndexSettings indexSettings,
                              IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason) {
    String indexName = index.getName();
    if (isBlobIndex(indexName)) {
        BlobIndex blobIndex = indices.remove(indexName);
        assert blobIndex != null : "BlobIndex not found on afterIndexDeleted";
    }
}
 
源代码9 项目: crate   文件: SpanishAnalyzerProvider.java
SpanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
    analyzer = new SpanishAnalyzer(
        Analysis.parseStopWords(env, settings, SpanishAnalyzer.getDefaultStopSet()),
        Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
    );
    analyzer.setVersion(version);
}
 
public static AnalyzerProvider<? extends Analyzer> getJiebaSearchAnalyzerProvider(IndexSettings indexSettings,
                                                                                  Environment environment,
                                                                                  String s,
                                                                                  Settings settings) {
  JiebaAnalyzerProvider jiebaAnalyzerProvider = new JiebaAnalyzerProvider(indexSettings,
      environment,
      s,
      settings,
      JiebaSegmenter.SegMode.SEARCH);

  return jiebaAnalyzerProvider;
}
 
public NaturalSortKeyAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name,
                                      Settings settings) {
    super(indexSettings, name, settings);
    this.collator = createCollator(settings);
    this.digits = settings.getAsInt("digits", 1);
    this.maxTokens = settings.getAsInt("maxTokens", 2);
    this.bufferSize = settings.getAsInt("bufferSize", KeywordTokenizer.DEFAULT_BUFFER_SIZE);
}
 
public static TokenizerFactory getJiebaSearchTokenizerFactory(IndexSettings indexSettings,
                                                              Environment environment,
                                                              String s,
                                                              Settings settings) {
  JiebaTokenizerFactory jiebaTokenizerFactory = new JiebaTokenizerFactory(indexSettings,
      environment,
      settings);
  jiebaTokenizerFactory.setSegMode(JiebaSegmenter.SegMode.SEARCH.name());
  return jiebaTokenizerFactory;
}
 
源代码13 项目: crate   文件: AnalysisRegistry.java
public Map<String, TokenFilterFactory> buildTokenFilterFactories(IndexSettings indexSettings) throws IOException {
    final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_FILTER);
    Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
    /*
     * synonym and synonym_graph are different than everything else since they need access to the tokenizer factories for the index.
     * instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
     * hide internal data-structures as much as possible.
     */
    tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
    tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)));

    return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters);
}
 
public PairTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
    super(indexSettings, name, settings);
    this.pairs = new LinkedHashMap<>();
    Settings pairsSettings = settings.getAsSettings("pairs");
    for (String key: pairsSettings.keySet()) {
        pairs.put(key, pairsSettings.get(key));
    }
}
 
源代码15 项目: crate   文件: AnalysisRegistry.java
/**
 * Returns a registered {@link TokenizerFactory} provider by {@link IndexSettings}
 *  or a registered {@link TokenizerFactory} provider by predefined name
 *  or <code>null</code> if the tokenizer was not registered
 * @param tokenizer global or defined tokenizer name
 * @param indexSettings an index settings
 * @return {@link TokenizerFactory} provider or <code>null</code>
 */
public AnalysisProvider<TokenizerFactory> getTokenizerProvider(String tokenizer, IndexSettings indexSettings) {
    final Map<String, Settings> tokenizerSettings = indexSettings.getSettings().getGroups("index.analysis.tokenizer");
    if (tokenizerSettings.containsKey(tokenizer)) {
        Settings currentSettings = tokenizerSettings.get(tokenizer);
        return getAnalysisProvider(Component.TOKENIZER, tokenizers, tokenizer, currentSettings.get("type"));
    } else {
        return getTokenizerProvider(tokenizer);
    }
}
 
源代码16 项目: crate   文件: ChineseAnalyzerProvider.java
ChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
    super(indexSettings, name, settings);
    // old index: best effort
    analyzer = new StandardAnalyzer();
    analyzer.setVersion(version);

}
 
protected AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);

    minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
    minSubwordSize = settings.getAsInt("min_subword_size", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE);
    maxSubwordSize = settings.getAsInt("max_subword_size", CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE);
    onlyLongestMatch = settings.getAsBoolean("only_longest_match", false);
    wordList = Analysis.getWordSet(env, settings, "word_list");
    if (wordList == null) {
        throw new IllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly");
    }
}
 
源代码18 项目: crate   文件: EngineTestCase.java
protected Settings indexSettings() {
    // TODO randomize more settings
    return Settings.builder()
        .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
        .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName)
        .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
        .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(),
            between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY)))
        .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean())
        .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
            randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000))
        .build();
}
 
源代码19 项目: crate   文件: NorwegianAnalyzerProvider.java
NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
    analyzer = new NorwegianAnalyzer(
        Analysis.parseStopWords(env, settings, NorwegianAnalyzer.getDefaultStopSet()),
        Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
    );
    analyzer.setVersion(version);
}
 
源代码20 项目: crate   文件: IndicesService.java
/**
 * Creates a new pending delete of a shard
 */
PendingDelete(Index index, IndexSettings settings) {
    this.index = index;
    this.shardId = -1;
    this.settings = settings;
    this.deleteIndex = true;
}
 
源代码21 项目: crate   文件: RussianAnalyzerProvider.java
RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
    super(indexSettings, name, settings);
    analyzer = new RussianAnalyzer(
        Analysis.parseStopWords(env, settings, RussianAnalyzer.getDefaultStopSet()),
        Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
    );
    analyzer.setVersion(version);
}
 
public static HanLPTokenizerFactory createStandard(IndexSettings indexSettings,
                                                   Environment environment,
                                                   String name, Settings
                                                       settings) {
    return new HanLPTokenizerFactory(indexSettings, environment, name, settings) {
        @Override
        public Tokenizer create() {
            return new HanLPTokenizer(StandardTokenizer.SEGMENT, defaultStopWordDictionary, enablePorterStemming);
        }
    };
}
 
源代码23 项目: crate   文件: ESIntegTestCase.java
/**
 * Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends.
 * This method can be overwritten by subclasses to set defaults for the indices that are created by the test.
 * By default it returns a settings object that sets a random number of shards. Number of shards and replicas
 * can be controlled through specific methods.
 */
public Settings indexSettings() {
    Settings.Builder builder = Settings.builder();
    int numberOfShards = numberOfShards();
    if (numberOfShards > 0) {
        builder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
    }
    int numberOfReplicas = numberOfReplicas();
    if (numberOfReplicas >= 0) {
        builder.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
    }
    // 30% of the time
    if (randomInt(9) < 3) {
        final String dataPath = randomAlphaOfLength(10);
        logger.info("using custom data_path for index: [{}]", dataPath);
        builder.put(IndexMetaData.SETTING_DATA_PATH, dataPath);
    }
    // always default delayed allocation to 0 to make sure we have tests are not delayed
    builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0);
    builder.put(SETTING_AUTO_EXPAND_REPLICAS, "false");
    builder.put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), ActiveShardCount.ONE.toString());
    builder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
    if (randomBoolean()) {
        builder.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000));
    }
    return builder.build();
}
 
public DecompoundTokenFilterFactory(IndexSettings indexSettings, Environment environment,
                                    @Assisted String name, @Assisted Settings settings) {
    super(indexSettings, name, settings);
    this.decompounder = createDecompounder(settings);
    this.respectKeywords = settings.getAsBoolean("respect_keywords", false);
    this.subwordsonly = settings.getAsBoolean("subwords_only", false);
    this.usePayload = settings.getAsBoolean("use_payload", false);
    if (cache == null && settings.getAsBoolean("use_cache", false)) {
        cache = createCache(settings);
    }
}
 
public static HanLPTokenizerFactory createNShort(IndexSettings indexSettings,
                                                 Environment environment,
                                                 String name,
                                                 Settings settings) {
    return new HanLPTokenizerFactory(indexSettings, environment, name, settings) {
        @Override
        public Tokenizer create() {
            Segment seg = new NShortSegment().enableCustomDictionary(false)
                                             .enablePlaceRecognize(true)
                                             .enableOrganizationRecognize(true);
            return new HanLPTokenizer(seg, defaultStopWordDictionary, enablePorterStemming);
        }
    };
}
 
public static HanLPTokenizerFactory createCRF(IndexSettings indexSettings,
                                              Environment environment,
                                              String name,
                                              Settings settings) {
    return new HanLPTokenizerFactory(indexSettings, environment, name, settings) {
        @Override
        public Tokenizer create() {
            Segment seg = new CRFSegment().enablePartOfSpeechTagging(true);
            return new HanLPTokenizer(seg, defaultStopWordDictionary, enablePorterStemming);
        }
    };
}
 
源代码27 项目: crate   文件: IndexShard.java
private static void persistMetadata(
        final ShardPath shardPath,
        final IndexSettings indexSettings,
        final ShardRouting newRouting,
        final @Nullable ShardRouting currentRouting,
        final Logger logger) throws IOException {
    assert newRouting != null : "newRouting must not be null";

    // only persist metadata if routing information that is persisted in shard state metadata actually changed
    final ShardId shardId = newRouting.shardId();
    if (currentRouting == null
        || currentRouting.primary() != newRouting.primary()
        || currentRouting.allocationId().equals(newRouting.allocationId()) == false) {
        assert currentRouting == null || currentRouting.isSameAllocation(newRouting);
        final String writeReason;
        if (currentRouting == null) {
            writeReason = "initial state with allocation id [" + newRouting.allocationId() + "]";
        } else {
            writeReason = "routing changed from " + currentRouting + " to " + newRouting;
        }
        logger.trace("{} writing shard state, reason [{}]", shardId, writeReason);
        final ShardStateMetaData newShardStateMetadata =
                new ShardStateMetaData(newRouting.primary(), indexSettings.getUUID(), newRouting.allocationId());
        ShardStateMetaData.FORMAT.write(newShardStateMetadata, shardPath.getShardStatePath());
    } else {
        logger.trace("{} skip writing shard state, has been written before", shardId);
    }
}
 
源代码28 项目: mynlp   文件: MynlpTokenizerFactory.java
public MynlpTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
    super(indexSettings, name, settings);
    factory = new LexerFactory(settings);

    String type = "core";

    if (name.startsWith("mynlp-")) {
        type = name.substring("mynlp-".length());
    }

    factory.setType(type);
}
 
public IcuTransformTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name,
                                      Settings settings) {
    super(indexSettings, name, settings);
    String id = settings.get("id", "Null");
    String direction = settings.get("dir", "forward");
    int dir = "forward".equals(direction) ? Transliterator.FORWARD : Transliterator.REVERSE;
    String rules = settings.get("rules");
    this.transliterator = rules != null ?
            Transliterator.createFromRules(id, rules, dir) :
            Transliterator.getInstance(id, dir);
    String unicodeSetFilter = settings.get("unicodeSetFilter");
    if (unicodeSetFilter != null) {
        transliterator.setFilter(new UnicodeSet(unicodeSetFilter).freeze());
    }
}
 
源代码30 项目: crate   文件: AnalysisRegistry.java
/**
 * Creates an index-level {@link IndexAnalyzers} from this registry using the given index settings
 */
public IndexAnalyzers build(IndexSettings indexSettings) throws IOException {

    final Map<String, CharFilterFactory> charFilterFactories = buildCharFilterFactories(indexSettings);
    final Map<String, TokenizerFactory> tokenizerFactories = buildTokenizerFactories(indexSettings);
    final Map<String, TokenFilterFactory> tokenFilterFactories = buildTokenFilterFactories(indexSettings);
    final Map<String, AnalyzerProvider<?>> analyzierFactories = buildAnalyzerFactories(indexSettings);
    final Map<String, AnalyzerProvider<?>> normalizerFactories = buildNormalizerFactories(indexSettings);
    return build(indexSettings, analyzierFactories, normalizerFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
}