下面列出了org.apache.lucene.search.TopFieldDocs#org.elasticsearch.search.aggregations.InternalAggregation 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
InternalCardinality reduced = null;
for (InternalAggregation aggregation : aggregations) {
final InternalCardinality cardinality = (InternalCardinality) aggregation;
if (cardinality.counts != null) {
if (reduced == null) {
reduced = new InternalCardinality(name, new HyperLogLogPlusPlus(cardinality.counts.precision(),
BigArrays.NON_RECYCLING_INSTANCE, 1), this.valueFormatter, pipelineAggregators(), getMetaData());
}
reduced.merge(cardinality);
}
}
if (reduced == null) { // all empty
return aggregations.get(0);
} else {
return reduced;
}
}
/**
* Wrap the provided aggregator so that it behaves (almost) as if it had
* been collected directly.
*/
@Override
public Aggregator wrap(final Aggregator in) {
return new WrappedAggregator(in) {
@Override
public InternalAggregation buildAggregation(long bucket) throws IOException {
if (selectedBuckets == null) {
throw new IllegalStateException("Collection has not been replayed yet.");
}
final long rebasedBucket = selectedBuckets.find(bucket);
if (rebasedBucket == -1) {
throw new IllegalStateException("Cannot build for a bucket which has not been collected");
}
return in.buildAggregation(rebasedBucket);
}
};
}
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
List<B> reducedBuckets = reduceBuckets(aggregations, reduceContext);
// adding empty buckets if needed
if (minDocCount == 0) {
addEmptyBuckets(reducedBuckets, reduceContext);
}
if (order == InternalOrder.KEY_ASC) {
// nothing to do, data are already sorted since shards return
// sorted buckets and the merge-sort performed by reduceBuckets
// maintains order
} else if (order == InternalOrder.KEY_DESC) {
// we just need to reverse here...
List<B> reverse = new ArrayList<>(reducedBuckets);
Collections.reverse(reverse);
reducedBuckets = reverse;
} else {
// sorted by sub-aggregation, need to fall back to a costly n*log(n) sort
CollectionUtil.introSort(reducedBuckets, order.comparator());
}
return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(),
getMetaData());
}
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
InternalHistogram histo = (InternalHistogram) aggregation;
List<? extends InternalHistogram.Bucket> buckets = histo.getBuckets();
InternalHistogram.Factory<? extends InternalHistogram.Bucket> factory = histo.getFactory();
List newBuckets = new ArrayList<>();
double sum = 0;
for (InternalHistogram.Bucket bucket : buckets) {
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);
sum += thisBucketValue;
List<InternalAggregation> aggs = new ArrayList<>(eagerTransform(bucket.getAggregations().asList(),
AGGREGATION_TRANFORM_FUNCTION));
aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<PipelineAggregator>(), metaData()));
InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(),
new InternalAggregations(aggs), bucket.getKeyed(), bucket.getFormatter());
newBuckets.add(newBucket);
}
return factory.create(newBuckets, histo);
}
@Override
protected InternalAggregation buildAggregation(List<PipelineAggregator> pipelineAggregators, Map<String, Object> metadata) {
// Perform the sorting and percentile collection now that all the data
// has been collected.
Collections.sort(data);
double[] percentiles = new double[percents.length];
if (data.size() == 0) {
for (int i = 0; i < percents.length; i++) {
percentiles[i] = Double.NaN;
}
} else {
for (int i = 0; i < percents.length; i++) {
int index = (int)((percents[i] / 100.0) * data.size());
percentiles[i] = data.get(index);
}
}
// todo need postCollection() to clean up temp sorted data?
return new InternalPercentilesBucket(name(), percents, percentiles, formatter, pipelineAggregators, metadata);
}
@Override
public Object getProperty(List<String> path) {
if (path.isEmpty()) {
return this;
} else {
String aggName = path.get(0);
if (aggName.equals("_count")) {
if (path.size() > 1) {
throw new IllegalArgumentException("_count must be the last element in the path");
}
return getDocCount();
}
InternalAggregation aggregation = aggregations.get(aggName);
if (aggregation == null) {
throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + getName() + "]");
}
return aggregation.getProperty(path.subList(1, path.size()));
}
}
@Override
public final InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) {
preCollection();
List<String> bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList();
for (Aggregation aggregation : aggregations) {
if (aggregation.getName().equals(bucketsPath.get(0))) {
bucketsPath = bucketsPath.subList(1, bucketsPath.size());
InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation;
List<? extends Bucket> buckets = multiBucketsAgg.getBuckets();
for (int i = 0; i < buckets.size(); i++) {
Bucket bucket = buckets.get(i);
Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy);
if (bucketValue != null && !Double.isNaN(bucketValue)) {
collectBucketValue(bucket.getKeyAsString(), bucketValue);
}
}
}
}
return buildAggregation(Collections.EMPTY_LIST, metaData());
}
@Override
protected Aggregator createUnmapped(
SearchContext searchContext,
Aggregator parent,
List<PipelineAggregator> pipelineAggregators,
Map<String,
Object> metaData) throws IOException {
final InternalAggregation aggregation = new InternalPathHierarchy(name, new ArrayList<>(), order, minDocCount,
bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), 0, separator, pipelineAggregators, metaData);
return new NonCollectingAggregator(name, searchContext, parent, factories, pipelineAggregators, metaData) {
{
// even in the case of an unmapped aggregator, validate the
// order
InternalOrder.validate(order, this);
}
@Override
public InternalAggregation buildEmptyAggregation() { return aggregation; }
};
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
Object aggregation;
if (combineScript != null) {
aggregation = combineScript.run();
} else {
aggregation = params.get("_agg");
}
return new InternalScriptedMetric(name, aggregation, reduceScript, pipelineAggregators(),
metaData());
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
TopDocsAndLeafCollector topDocsCollector = topDocsCollectors.get(owningBucketOrdinal);
final InternalTopHits topHits;
if (topDocsCollector == null) {
topHits = buildEmptyAggregation();
} else {
final TopDocs topDocs = topDocsCollector.topLevelCollector.topDocs();
subSearchContext.queryResult().topDocs(topDocs);
int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
}
subSearchContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
fetchPhase.execute(subSearchContext);
FetchSearchResult fetchResult = subSearchContext.fetchResult();
InternalSearchHit[] internalHits = fetchResult.fetchResult().hits().internalHits();
for (int i = 0; i < internalHits.length; i++) {
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
InternalSearchHit searchHitFields = internalHits[i];
searchHitFields.shard(subSearchContext.shardTarget());
searchHitFields.score(scoreDoc.score);
if (scoreDoc instanceof FieldDoc) {
FieldDoc fieldDoc = (FieldDoc) scoreDoc;
searchHitFields.sortValues(fieldDoc.fields);
}
}
topHits = new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits(), pipelineAggregators(),
metaData());
}
return topHits;
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
TDigestState state = getState(owningBucketOrdinal);
if (state == null) {
return buildEmptyAggregation();
} else {
return new InternalTDigestPercentiles(name, keys, state, keyed, formatter, pipelineAggregators(), metaData());
}
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
StreamSummary<Term> summary = summaries == null || owningBucketOrdinal >= summaries.size() ? null : summaries.get(owningBucketOrdinal);
InternalTopK topk = new InternalTopK(name, size, summary);
for (TopK.Bucket bucket : topk.getBuckets()) {
bucket.aggregations = bucketAggregations(bucket.bucketOrd);
}
return topk;
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
TDigestState state = getState(owningBucketOrdinal);
if (state == null) {
return buildEmptyAggregation();
} else {
return new InternalTDigestPercentileRanks(name, keys, state, keyed, formatter, pipelineAggregators(), metaData());
}
}
@Override
public AbstractInternalTDigestPercentiles doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
TDigestState merged = null;
for (InternalAggregation aggregation : aggregations) {
final AbstractInternalTDigestPercentiles percentiles = (AbstractInternalTDigestPercentiles) aggregation;
if (merged == null) {
merged = new TDigestState(percentiles.state.compression());
}
merged.add(percentiles.state);
}
return createReduced(getName(), keys, merged, keyed, pipelineAggregators(), getMetaData());
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) {
if (counts == null || owningBucketOrdinal >= counts.maxBucket() || counts.cardinality(owningBucketOrdinal) == 0) {
return buildEmptyAggregation();
}
// We need to build a copy because the returned Aggregation needs remain usable after
// this Aggregator (and its HLL++ counters) is released.
HyperLogLogPlusPlus copy = new HyperLogLogPlusPlus(precision, BigArrays.NON_RECYCLING_INSTANCE, 1);
copy.merge(0, counts, owningBucketOrdinal);
return new InternalCardinality(name, copy, formatter, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException {
assert owningBucketOrdinal == 0;
List<InternalHistogram.Bucket> buckets = new ArrayList<>((int) bucketOrds.size());
for (long i = 0; i < bucketOrds.size(); i++) {
buckets.add(histogramFactory.createBucket(rounding.valueForKey(bucketOrds.get(i)), bucketDocCount(i), bucketAggregations(i), keyed, formatter));
}
// the contract of the histogram aggregation is that shards must return buckets ordered by key in ascending order
CollectionUtil.introSort(buckets, InternalOrder.KEY_ASC.comparator());
// value source will be null for unmapped fields
InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null;
return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= sums.size()) {
return buildEmptyAggregation();
}
return new InternalAvg(name, sums.get(bucket), counts.get(bucket), formatter, pipelineAggregators(), metaData());
}
@Override
protected Aggregator createUnmapped(
SearchContext searchContext,
Aggregator parent,
List<PipelineAggregator> pipelineAggregators,
Map<String,
Object> metaData) throws IOException {
final InternalAggregation aggregation = new InternalGeoShape(name, new ArrayList<>(), output_format,
bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(),
pipelineAggregators, metaData);
return new NonCollectingAggregator(name, searchContext, parent, factories, pipelineAggregators, metaData) {
@Override
public InternalAggregation buildEmptyAggregation() { return aggregation; }
};
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= sums.size()) {
return buildEmptyAggregation();
}
return new InternalStats(name, counts.get(bucket), sums.get(bucket), mins.get(bucket),
maxes.get(bucket), formatter, pipelineAggregators(), metaData());
}
@Override
public InternalStats doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
long count = 0;
double min = Double.POSITIVE_INFINITY;
double max = Double.NEGATIVE_INFINITY;
double sum = 0;
for (InternalAggregation aggregation : aggregations) {
InternalStats stats = (InternalStats) aggregation;
count += stats.getCount();
min = Math.min(min, stats.getMin());
max = Math.max(max, stats.getMax());
sum += stats.getSum();
}
return new InternalStats(name, count, sum, min, max, valueFormatter, pipelineAggregators(), getMetaData());
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= counts.size()) {
return buildEmptyAggregation();
}
return new InternalExtendedStats(name, counts.get(bucket), sums.get(bucket),
mins.get(bucket), maxes.get(bucket), sumOfSqrs.get(bucket), sigma, formatter,
pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= sums.size()) {
return buildEmptyAggregation();
}
return new InternalSum(name, sums.get(bucket), formatter, pipelineAggregators(), metaData());
}
@Override
public InternalSum doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
double sum = 0;
for (InternalAggregation aggregation : aggregations) {
sum += ((InternalSum) aggregation).sum;
}
return new InternalSum(name, sum, valueFormatter, pipelineAggregators(), getMetaData());
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= maxes.size()) {
return buildEmptyAggregation();
}
return new InternalMax(name, maxes.get(bucket), formatter, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= centroids.size()) {
return buildEmptyAggregation();
}
final long bucketCount = counts.get(bucket);
final GeoPoint bucketCentroid = (bucketCount > 0) ? GeoPoint.fromIndexLong(centroids.get(bucket)) :
new GeoPoint(Double.NaN, Double.NaN);
return new InternalGeoCentroid(name, bucketCentroid , bucketCount, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation buildAggregation(long bucket) {
if (valuesSource == null || bucket >= counts.size()) {
return buildEmptyAggregation();
}
return new InternalValueCount(name, counts.get(bucket), formatter, pipelineAggregators(), metaData());
}
@Override
public InternalAggregation doReduce(List<InternalAggregation> aggregations, ReduceContext reduceContext) {
long valueCount = 0;
for (InternalAggregation aggregation : aggregations) {
valueCount += ((InternalValueCount) aggregation).value;
}
return new InternalValueCount(name, valueCount, valueFormatter, pipelineAggregators(), getMetaData());
}
private List<InternalAggregation> toInternal(List<Aggregation> list) {
List<InternalAggregation> t = new ArrayList<>(list.size());
for (Aggregation a : list) {
t.add((InternalAggregation) a);
}
return t;
}
@Override
protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) throws IOException {
return new NonCollectingAggregator(name, aggregationContext, parent, pipelineAggregators, metaData) {
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalChildren(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData());
}
};
}
@Override
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
List<? extends Bucket> buckets = originalAgg.getBuckets();
CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.<String, String>emptyMap());
List newBuckets = new ArrayList<>();
for (Bucket bucket : buckets) {
Map<String, Object> vars = new HashMap<>();
if (script.getParams() != null) {
vars.putAll(script.getParams());
}
for (Map.Entry<String, String> entry : bucketsPathsMap.entrySet()) {
String varName = entry.getKey();
String bucketsPath = entry.getValue();
Double value = resolveBucketValue(originalAgg, bucket, bucketsPath, gapPolicy);
vars.put(varName, value);
}
ExecutableScript executableScript = reduceContext.scriptService().executable(compiledScript, vars);
Object scriptReturnValue = executableScript.run();
final boolean keepBucket;
// TODO: WTF!!!!!
if ("expression".equals(script.getLang())) {
double scriptDoubleValue = (double) scriptReturnValue;
keepBucket = scriptDoubleValue == 1.0;
} else {
keepBucket = (boolean) scriptReturnValue;
}
if (keepBucket) {
newBuckets.add(bucket);
}
}
return originalAgg.create(newBuckets);
}