io.netty.channel.AdaptiveRecvByteBufAllocator#org.elasticsearch.common.util.BigArrays源码实例Demo

下面列出了io.netty.channel.AdaptiveRecvByteBufAllocator#org.elasticsearch.common.util.BigArrays 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Override
public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
        CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry,
        NamedXContentRegistry xContentRegistry, NetworkService networkService, Dispatcher dispatcher) {
    
    final Map<String, Supplier<HttpServerTransport>> httpTransports = new HashMap<String, Supplier<HttpServerTransport>>(1);
    if (!client && httpSSLEnabled) {
        
        final ValidatingDispatcher validatingDispatcher = new ValidatingDispatcher(threadPool.getThreadContext(), dispatcher, settings, configPath, NOOP_SSL_EXCEPTION_HANDLER);
        final OpenDistroSecuritySSLNettyHttpServerTransport sgsnht = new OpenDistroSecuritySSLNettyHttpServerTransport(settings, networkService, bigArrays, threadPool, odsks, xContentRegistry, validatingDispatcher, NOOP_SSL_EXCEPTION_HANDLER);
        
        httpTransports.put("com.amazon.opendistroforelasticsearch.security.ssl.http.netty.OpenDistroSecuritySSLNettyHttpServerTransport", () -> sgsnht);
        
    }
    return httpTransports;
}
 
源代码2 项目: Elasticsearch   文件: CrateSearchContext.java
public CrateSearchContext(long id,
                          final long nowInMillis,
                          SearchShardTarget shardTarget,
                          Engine.Searcher engineSearcher,
                          IndexService indexService,
                          final IndexShard indexShard,
                          ScriptService scriptService,
                          PageCacheRecycler pageCacheRecycler,
                          BigArrays bigArrays,
                          Counter timeEstimateCounter,
                          Optional<Scroll> scroll) {
    super(id, new CrateSearchShardRequest(nowInMillis, scroll, indexShard),
            shardTarget, engineSearcher, indexService,
            indexShard, scriptService, pageCacheRecycler,
            bigArrays, timeEstimateCounter, ParseFieldMatcher.STRICT, SearchService.NO_TIMEOUT);
    this.engineSearcher = engineSearcher;
}
 
源代码3 项目: Elasticsearch   文件: GeoBoundsAggregator.java
protected GeoBoundsAggregator(String name, AggregationContext aggregationContext, Aggregator parent,
        ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, List<PipelineAggregator> pipelineAggregators,
        Map<String, Object> metaData) throws IOException {
    super(name, aggregationContext, parent, pipelineAggregators, metaData);
    this.valuesSource = valuesSource;
    this.wrapLongitude = wrapLongitude;
    if (valuesSource != null) {
        final BigArrays bigArrays = context.bigArrays();
        tops = bigArrays.newDoubleArray(1, false);
        tops.fill(0, tops.size(), Double.NEGATIVE_INFINITY);
        bottoms = bigArrays.newDoubleArray(1, false);
        bottoms.fill(0, bottoms.size(), Double.POSITIVE_INFINITY);
        posLefts = bigArrays.newDoubleArray(1, false);
        posLefts.fill(0, posLefts.size(), Double.POSITIVE_INFINITY);
        posRights = bigArrays.newDoubleArray(1, false);
        posRights.fill(0, posRights.size(), Double.NEGATIVE_INFINITY);
        negLefts = bigArrays.newDoubleArray(1, false);
        negLefts.fill(0, negLefts.size(), Double.POSITIVE_INFINITY);
        negRights = bigArrays.newDoubleArray(1, false);
        negRights.fill(0, negRights.size(), Double.NEGATIVE_INFINITY);
    }
}
 
源代码4 项目: Elasticsearch   文件: InternalCardinality.java
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
    InternalCardinality reduced = null;
    for (InternalAggregation aggregation : aggregations) {
        final InternalCardinality cardinality = (InternalCardinality) aggregation;
        if (cardinality.counts != null) {
            if (reduced == null) {
                reduced = new InternalCardinality(name, new HyperLogLogPlusPlus(cardinality.counts.precision(),
                        BigArrays.NON_RECYCLING_INSTANCE, 1), this.valueFormatter, pipelineAggregators(), getMetaData());
            }
            reduced.merge(cardinality);
        }
    }

    if (reduced == null) { // all empty
        return aggregations.get(0);
    } else {
        return reduced;
    }
}
 
源代码5 项目: Elasticsearch   文件: HyperLogLogPlusPlus.java
public static HyperLogLogPlusPlus readFrom(StreamInput in, BigArrays bigArrays) throws IOException {
    final int precision = in.readVInt();
    HyperLogLogPlusPlus counts = new HyperLogLogPlusPlus(precision, bigArrays, 1);
    final boolean algorithm = in.readBoolean();
    if (algorithm == LINEAR_COUNTING) {
        counts.algorithm.clear(0);
        final long size = in.readVLong();
        for (long i = 0; i < size; ++i) {
            final int encoded = in.readInt();
            counts.hashSet.add(0, encoded);
        }
    } else {
        counts.algorithm.set(0);
        for (int i = 0; i < counts.m; ++i) {
            counts.runLens.set(i, in.readByte());
        }
    }
    return counts;
}
 
源代码6 项目: Elasticsearch   文件: AvgAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
    return new LeafBucketCollectorBase(sub, values) {
        @Override
        public void collect(int doc, long bucket) throws IOException {
            counts = bigArrays.grow(counts, bucket + 1);
            sums = bigArrays.grow(sums, bucket + 1);

            values.setDocument(doc);
            final int valueCount = values.count();
            counts.increment(bucket, valueCount);
            double sum = 0;
            for (int i = 0; i < valueCount; i++) {
                sum += values.valueAt(i);
            }
            sums.increment(bucket, sum);
        }
    };
}
 
源代码7 项目: Elasticsearch   文件: StatsAggregator.java
public StatsAggregator(String name, ValuesSource.Numeric valuesSource, ValueFormatter formatter,
                       AggregationContext context,
                       Aggregator parent, List<PipelineAggregator> pipelineAggregators,
                       Map<String, Object> metaData) throws IOException {
    super(name, context, parent, pipelineAggregators, metaData);
    this.valuesSource = valuesSource;
    if (valuesSource != null) {
        final BigArrays bigArrays = context.bigArrays();
        counts = bigArrays.newLongArray(1, true);
        sums = bigArrays.newDoubleArray(1, true);
        mins = bigArrays.newDoubleArray(1, false);
        mins.fill(0, mins.size(), Double.POSITIVE_INFINITY);
        maxes = bigArrays.newDoubleArray(1, false);
        maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY);
    }
    this.formatter = formatter;
}
 
源代码8 项目: Elasticsearch   文件: ExtendedStatsAggregator.java
public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, ValueFormatter formatter,
        AggregationContext context, Aggregator parent, double sigma, List<PipelineAggregator> pipelineAggregators,
        Map<String, Object> metaData)
        throws IOException {
    super(name, context, parent, pipelineAggregators, metaData);
    this.valuesSource = valuesSource;
    this.formatter = formatter;
    this.sigma = sigma;
    if (valuesSource != null) {
        final BigArrays bigArrays = context.bigArrays();
        counts = bigArrays.newLongArray(1, true);
        sums = bigArrays.newDoubleArray(1, true);
        mins = bigArrays.newDoubleArray(1, false);
        mins.fill(0, mins.size(), Double.POSITIVE_INFINITY);
        maxes = bigArrays.newDoubleArray(1, false);
        maxes.fill(0, maxes.size(), Double.NEGATIVE_INFINITY);
        sumOfSqrs = bigArrays.newDoubleArray(1, true);
    }
}
 
源代码9 项目: Elasticsearch   文件: SumAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
    return new LeafBucketCollectorBase(sub, values) {
        @Override
        public void collect(int doc, long bucket) throws IOException {
            sums = bigArrays.grow(sums, bucket + 1);
            values.setDocument(doc);
            final int valuesCount = values.count();
            double sum = 0;
            for (int i = 0; i < valuesCount; i++) {
                sum += values.valueAt(i);
            }
            sums.increment(bucket, sum);
        }
    };
}
 
源代码10 项目: Elasticsearch   文件: MaxAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
}
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx);
    final NumericDoubleValues values = MultiValueMode.MAX.select(allValues, Double.NEGATIVE_INFINITY);
    return new LeafBucketCollectorBase(sub, allValues) {

        @Override
        public void collect(int doc, long bucket) throws IOException {
            if (bucket >= maxes.size()) {
                long from = maxes.size();
                maxes = bigArrays.grow(maxes, bucket + 1);
                maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY);
            }
            final double value = values.get(doc);
            double max = maxes.get(bucket);
            max = Math.max(max, value);
            maxes.set(bucket, max);
        }

    };
}
 
源代码11 项目: Elasticsearch   文件: MinAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx);
    final NumericDoubleValues values = MultiValueMode.MIN.select(allValues, Double.POSITIVE_INFINITY);
    return new LeafBucketCollectorBase(sub, allValues) {

        @Override
        public void collect(int doc, long bucket) throws IOException {
            if (bucket >= mins.size()) {
                long from = mins.size();
                mins = bigArrays.grow(mins, bucket + 1);
                mins.fill(from, mins.size(), Double.POSITIVE_INFINITY);
            }
            final double value = values.get(doc);
            double min = mins.get(bucket);
            min = Math.min(min, value);
            mins.set(bucket, min);
        }

    };
}
 
源代码12 项目: Elasticsearch   文件: DefaultSearchContext.java
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget,
                            Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard,
                            ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
                            BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher,
                            TimeValue timeout
) {
    super(parseFieldMatcher, request);
    this.id = id;
    this.request = request;
    this.searchType = request.searchType();
    this.shardTarget = shardTarget;
    this.engineSearcher = engineSearcher;
    this.scriptService = scriptService;
    this.pageCacheRecycler = pageCacheRecycler;
    // SearchContexts use a BigArrays that can circuit break
    this.bigArrays = bigArrays.withCircuitBreaking();
    this.dfsResult = new DfsSearchResult(id, shardTarget);
    this.queryResult = new QuerySearchResult(id, shardTarget);
    this.fetchResult = new FetchSearchResult(id, shardTarget);
    this.indexShard = indexShard;
    this.indexService = indexService;
    this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
    this.timeEstimateCounter = timeEstimateCounter;
    this.timeoutInMillis = timeout.millis();
}
 
源代码13 项目: Elasticsearch   文件: ShadowIndexShard.java
@Inject
public ShadowIndexShard(ShardId shardId, IndexSettingsService indexSettingsService,
                        IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService,
                        ThreadPool threadPool, MapperService mapperService,
                        IndexQueryParserService queryParserService, IndexCache indexCache,
                        IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache,
                        ShardPercolateService shardPercolateService, CodecService codecService,
                        ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
                        IndexService indexService, @Nullable IndicesWarmer warmer,
                        SnapshotDeletionPolicy deletionPolicy, SimilarityService similarityService,
                        EngineFactory factory, ClusterService clusterService,
                        ShardPath path, BigArrays bigArrays, IndexSearcherWrappingService wrappingService,
                        IndexingMemoryController indexingMemoryController, SearchService shardSearchService) throws IOException {
    super(shardId, indexSettingsService, indicesLifecycle, store, storeRecoveryService,
          threadPool, mapperService, queryParserService, indexCache, indexAliasesService,
          indicesQueryCache, shardPercolateService, codecService,
          termVectorsService, indexFieldDataService, indexService,
          warmer, deletionPolicy, similarityService,
          factory, clusterService, path, bigArrays, wrappingService, indexingMemoryController, shardSearchService);
}
 
源代码14 项目: Elasticsearch   文件: TranslogConfig.java
/**
 * Creates a new TranslogConfig instance
 * @param shardId the shard ID this translog belongs to
 * @param translogPath the path to use for the transaction log files
 * @param indexSettings the index settings used to set internal variables
 * @param durabilty the default durability setting for the translog
 * @param bigArrays a bigArrays instance used for temporarily allocating write operations
 * @param threadPool a {@link ThreadPool} to schedule async sync durability
 */
public TranslogConfig(ShardId shardId, Path translogPath, Settings indexSettings, Translog.Durabilty durabilty, BigArrays bigArrays, @Nullable ThreadPool threadPool) {
    this.indexSettings = indexSettings;
    this.shardId = shardId;
    this.translogPath = translogPath;
    this.durabilty = durabilty;
    this.threadPool = threadPool;
    this.bigArrays = bigArrays;
    this.type = TranslogWriter.Type.fromString(indexSettings.get(INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.BUFFERED.name()));
    this.bufferSize = (int) indexSettings.getAsBytesSize(INDEX_TRANSLOG_BUFFER_SIZE, IndexingMemoryController.INACTIVE_SHARD_TRANSLOG_BUFFER).bytes(); // Not really interesting, updated by IndexingMemoryController...

    syncInterval = indexSettings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
    if (syncInterval.millis() > 0 && threadPool != null) {
        syncOnEachOperation = false;
    } else if (syncInterval.millis() == 0) {
        syncOnEachOperation = true;
    } else {
        syncOnEachOperation = false;
    }
}
 
源代码15 项目: Elasticsearch   文件: Translog.java
/**
 * Writes all operations in the given iterable to the given output stream including the size of the array
 * use {@link #readOperations(StreamInput)} to read it back.
 */
public static void writeOperations(StreamOutput outStream, List<Operation> toWrite) throws IOException {
    final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(BigArrays.NON_RECYCLING_INSTANCE);
    try {
        outStream.writeInt(toWrite.size());
        final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
        for (Operation op : toWrite) {
            out.reset();
            final long start = out.position();
            out.skip(RamUsageEstimator.NUM_BYTES_INT);
            writeOperationNoSize(checksumStreamOutput, op);
            long end = out.position();
            int operationSize = (int) (out.position() - RamUsageEstimator.NUM_BYTES_INT - start);
            out.seek(start);
            out.writeInt(operationSize);
            out.seek(end);
            ReleasablePagedBytesReference bytes = out.bytes();
            bytes.writeTo(outStream);
        }
    } finally {
        Releasables.close(out.bytes());
    }

}
 
源代码16 项目: crate   文件: MockTcpTransport.java
public MockTcpTransport(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
                        CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry,
                        NetworkService networkService, Version mockVersion) {
    super("mock-tcp-transport",
          settings,
          threadPool,
          bigArrays,
          circuitBreakerService,
          namedWriteableRegistry,
          networkService);
    // we have our own crazy cached threadpool this one is not bounded at all...
    // using the ES thread factory here is crucial for tests otherwise disruption tests won't block that thread
    executor = Executors.newCachedThreadPool(EsExecutors.daemonThreadFactory(settings,
                                                                             Transports.TEST_MOCK_TRANSPORT_THREAD_PREFIX));
    this.mockVersion = mockVersion;
}
 
源代码17 项目: Elasticsearch   文件: NodeModule.java
@Override
protected void configure() {
    if (pageCacheRecyclerImpl == PageCacheRecycler.class) {
        bind(PageCacheRecycler.class).asEagerSingleton();
    } else {
        bind(PageCacheRecycler.class).to(pageCacheRecyclerImpl).asEagerSingleton();
    }
    if (bigArraysImpl == BigArrays.class) {
        bind(BigArrays.class).asEagerSingleton();
    } else {
        bind(BigArrays.class).to(bigArraysImpl).asEagerSingleton();
    }

    bind(Node.class).toInstance(node);
    bind(NodeSettingsService.class).asEagerSingleton();
    bind(NodeService.class).asEagerSingleton();
}
 
源代码18 项目: siren-join   文件: TransportTermsByQueryAction.java
/**
 * Constructor
 */
@Inject
public TransportTermsByQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
                                   TransportService transportService, IndicesService indicesService,
                                   CircuitBreakerService breakerService,
                                   ScriptService scriptService, PageCacheRecycler pageCacheRecycler,
                                   BigArrays bigArrays, ActionFilters actionFilters,
                                   IndexNameExpressionResolver indexNameExpressionResolver, Client client) {
  super(settings, TermsByQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
          indexNameExpressionResolver, TermsByQueryRequest.class, TermsByQueryShardRequest.class,
          // Use the generic threadpool which is cached, as we can end up with deadlock with the SEARCH threadpool
          ThreadPool.Names.GENERIC);
  this.indicesService = indicesService;
  this.scriptService = scriptService;
  this.pageCacheRecycler = pageCacheRecycler;
  this.bigArrays = bigArrays;
  this.breakerService = breakerService;
  this.client = client;
}
 
源代码19 项目: crate   文件: Translog.java
/**
 * Writes all operations in the given iterable to the given output stream including the size of the array
 * use {@link #readOperations(StreamInput, String)} to read it back.
 */
public static void writeOperations(StreamOutput outStream, List<Operation> toWrite) throws IOException {
    final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(BigArrays.NON_RECYCLING_INSTANCE);
    try {
        outStream.writeInt(toWrite.size());
        final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
        for (Operation op : toWrite) {
            out.reset();
            final long start = out.position();
            out.skip(Integer.BYTES);
            writeOperationNoSize(checksumStreamOutput, op);
            long end = out.position();
            int operationSize = (int) (out.position() - Integer.BYTES - start);
            out.seek(start);
            out.writeInt(operationSize);
            out.seek(end);
            ReleasablePagedBytesReference bytes = out.bytes();
            bytes.writeTo(outStream);
        }
    } finally {
        Releasables.close(out);
    }

}
 
源代码20 项目: crate   文件: Netty4Plugin.java
@Override
public Map<String, Supplier<Transport>> getTransports(Settings settings,
                                                      ThreadPool threadPool,
                                                      BigArrays bigArrays,
                                                      PageCacheRecycler pageCacheRecycler,
                                                      CircuitBreakerService circuitBreakerService,
                                                      NamedWriteableRegistry namedWriteableRegistry,
                                                      NetworkService networkService) {
    return Collections.singletonMap(
        NETTY_TRANSPORT_NAME,
        () -> new Netty4Transport(
            settings,
            threadPool,
            networkService,
            bigArrays,
            namedWriteableRegistry,
            circuitBreakerService
        )
    );
}
 
源代码21 项目: crate   文件: Netty4Plugin.java
@Override
public Map<String, Supplier<HttpServerTransport>> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
                                                                    CircuitBreakerService circuitBreakerService,
                                                                    NamedWriteableRegistry namedWriteableRegistry,
                                                                    NamedXContentRegistry xContentRegistry,
                                                                    NetworkService networkService,
                                                                    NodeClient nodeClient) {
    return Collections.singletonMap(
        NETTY_HTTP_TRANSPORT_NAME,
        () -> new Netty4HttpServerTransport(
            settings,
            networkService,
            bigArrays,
            threadPool,
            xContentRegistry,
            pipelineRegistry,
            nodeClient
        )
    );
}
 
源代码22 项目: crate   文件: ShardCollectorProviderFactory.java
ShardCollectorProviderFactory(ClusterService clusterService,
                              Settings settings,
                              Schemas schemas,
                              ThreadPool threadPool,
                              TransportActionProvider transportActionProvider,
                              BlobIndicesService blobIndicesService,
                              Functions functions,
                              LuceneQueryBuilder luceneQueryBuilder,
                              NodeJobsCounter nodeJobsCounter,
                              BigArrays bigArrays) {
    this.settings = settings;
    this.schemas = schemas;
    this.clusterService = clusterService;
    this.threadPool = threadPool;
    this.transportActionProvider = transportActionProvider;
    this.blobIndicesService = blobIndicesService;
    this.functions = functions;
    this.luceneQueryBuilder = luceneQueryBuilder;
    this.nodeJobsCounter = nodeJobsCounter;
    this.bigArrays = bigArrays;
}
 
public OpenDistroSecuritySSLNettyHttpServerTransport(final Settings settings, final NetworkService networkService, final BigArrays bigArrays,
        final ThreadPool threadPool, final OpenDistroSecurityKeyStore odks, final NamedXContentRegistry namedXContentRegistry, final ValidatingDispatcher dispatcher,
        final SslExceptionHandler errorHandler) {
    super(settings, networkService, bigArrays, threadPool, namedXContentRegistry, dispatcher);
    this.odks = odks;
    this.threadContext = threadPool.getThreadContext();
    this.errorHandler = errorHandler;
}
 
源代码24 项目: Elasticsearch   文件: DLBasedIndexShard.java
@Inject
public DLBasedIndexShard(ShardId shardId,
        IndexSettingsService indexSettingsService,
        IndicesLifecycle indicesLifecycle, Store store,
        StoreRecoveryService storeRecoveryService, ThreadPool threadPool,
        MapperService mapperService,
        IndexQueryParserService queryParserService, IndexCache indexCache,
        IndexAliasesService indexAliasesService,
        IndicesQueryCache indicesQueryCache,
        ShardPercolateService shardPercolateService,
        CodecService codecService,
        ShardTermVectorsService termVectorsService,
        IndexFieldDataService indexFieldDataService,
        IndexService indexService, IndicesWarmer warmer,
        SnapshotDeletionPolicy deletionPolicy,
        SimilarityService similarityService, EngineFactory factory,
        ClusterService clusterService, ShardPath path, BigArrays bigArrays,
        IndexSearcherWrappingService wrappingService,
        IndexingMemoryController indexingMemoryController,
        TransportGetOrChangePrimaryShardLeaseAction checkLeaseAction,
        TransportIndexShardStatsAction indexShardStatsAction,
        SearchService shardSearchService) {
    super(shardId, indexSettingsService, indicesLifecycle, store,
            storeRecoveryService, threadPool, mapperService, queryParserService,
            indexCache, indexAliasesService, indicesQueryCache,
            shardPercolateService, codecService, termVectorsService,
            indexFieldDataService, indexService, warmer, deletionPolicy,
            similarityService, factory, clusterService, path, bigArrays,
            wrappingService, indexingMemoryController, shardSearchService);
    this.localNodeId = clusterService.state().nodes().localNodeId();
    this.checkLeaseAction = checkLeaseAction;
    this.indexShardStatsAction = indexShardStatsAction;
}
 
源代码25 项目: Elasticsearch   文件: SearchContextFactory.java
@Inject
public SearchContextFactory(LuceneQueryBuilder luceneQueryBuilder,
                            ClusterService clusterService,
                            ScriptService scriptService,
                            PageCacheRecycler pageCacheRecycler,
                            BigArrays bigArrays,
                            ThreadPool threadPool) {
    this.luceneQueryBuilder = luceneQueryBuilder;
    this.clusterService = clusterService;
    this.scriptService = scriptService;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.threadPool = threadPool;
}
 
@Inject
public CrateNettyHttpServerTransport(Settings settings,
                                     NetworkService networkService,
                                     BigArrays bigArrays,
                                     BlobService blobService,
                                     BlobIndices blobIndices,
                                     DiscoveryNodeService discoveryNodeService) {
    super(settings, networkService, bigArrays);
    this.blobService = blobService;
    this.blobIndices = blobIndices;
    this.discoveryNodeService = discoveryNodeService;
}
 
源代码27 项目: crate   文件: MockTransportService.java
public static MockTcpTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) {
    // some tests use MockTransportService to do network based testing. Yet, we run tests in multiple JVMs that means
    // concurrent tests could claim port that another JVM just released and if that test tries to simulate a disconnect it might
    // be smart enough to re-connect depending on what is tested. To reduce the risk, since this is very hard to debug we use
    // a different default port range per JVM unless the incoming settings override it
    int basePort = 10300 + (JVM_ORDINAL * 100); // use a non-default port otherwise some cluster in this JVM might reuse a port
    settings = Settings.builder().put(TransportSettings.PORT.getKey(), basePort + "-" + (basePort + 100)).put(settings).build();
    NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables());
    return new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
        new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), version);
}
 
源代码28 项目: Elasticsearch   文件: PercolatorService.java
@Inject
public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService,
                         PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
                         HighlightPhase highlightPhase, ClusterService clusterService,
                         AggregationPhase aggregationPhase, ScriptService scriptService,
                         MappingUpdatedAction mappingUpdatedAction) {
    super(settings);
    this.indexNameExpressionResolver = indexNameExpressionResolver;
    this.parseFieldMatcher = new ParseFieldMatcher(settings);
    this.indicesService = indicesService;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.clusterService = clusterService;
    this.highlightPhase = highlightPhase;
    this.aggregationPhase = aggregationPhase;
    this.scriptService = scriptService;
    this.mappingUpdatedAction = mappingUpdatedAction;
    this.sortParseElement = new SortParseElement();

    final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes();
    cache = new CloseableThreadLocal<MemoryIndex>() {
        @Override
        protected MemoryIndex initialValue() {
            // TODO: should we expose payloads as an option? should offsets be turned on always?
            return new ExtendedMemoryIndex(true, false, maxReuseBytes);
        }
    };
    single = new SingleDocumentPercolatorIndex(cache);
    multi = new MultiDocumentPercolatorIndex(cache);

    percolatorTypes = new IntObjectHashMap<>(6);
    percolatorTypes.put(countPercolator.id(), countPercolator);
    percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
    percolatorTypes.put(matchPercolator.id(), matchPercolator);
    percolatorTypes.put(queryPercolator.id(), queryPercolator);
    percolatorTypes.put(scoringPercolator.id(), scoringPercolator);
    percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator);
}
 
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
    return new LeafBucketCollectorBase(sub, values) {
        @Override
        public void collect(int doc, long bucket) throws IOException {
            states = bigArrays.grow(states, bucket + 1);

            TDigestState state = states.get(bucket);
            if (state == null) {
                state = new TDigestState(compression);
                states.set(bucket, state);
            }

            values.setDocument(doc);
            final int valueCount = values.count();
            for (int i = 0; i < valueCount; i++) {
                state.add(values.valueAt(i));
            }
        }
    };
}
 
源代码30 项目: Elasticsearch   文件: InternalCardinality.java
@Override
protected void doReadFrom(StreamInput in) throws IOException {
    valueFormatter = ValueFormatterStreams.readOptional(in);
    if (in.readBoolean()) {
        counts = HyperLogLogPlusPlus.readFrom(in, BigArrays.NON_RECYCLING_INSTANCE);
    } else {
        counts = null;
    }
}