类org.apache.lucene.index.LeafReaderContext源码实例Demo

下面列出了怎么用org.apache.lucene.index.LeafReaderContext的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: ToParentBlockJoinSortField.java
private FieldComparator<?> getFloatComparator(int numHits) {
  return new FieldComparator.FloatComparator(numHits, getField(), (Float) missingValue) {
    @Override
    protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
      SortedNumericDocValues sortedNumeric = DocValues.getSortedNumeric(context.reader(), field);
      final BlockJoinSelector.Type type = order
          ? BlockJoinSelector.Type.MAX
          : BlockJoinSelector.Type.MIN;
      final BitSet parents = parentFilter.getBitSet(context);
      final BitSet children = childFilter.getBitSet(context);
      if (children == null) {
        return DocValues.emptyNumeric();
      }
      return new FilterNumericDocValues(BlockJoinSelector.wrap(sortedNumeric, type, parents, toIter(children))) {
        @Override
        public long longValue() throws IOException {
          // undo the numericutils sortability
          return NumericUtils.sortableFloatBits((int) super.longValue());
        }
      };
    }
  };
}
 
源代码2 项目: onedev   文件: DefaultIndexManager.java
private String getCommitIndexVersion(final IndexSearcher searcher, AnyObjectId commitId) throws IOException {
	final AtomicReference<String> indexVersion = new AtomicReference<>(null);
	
	searcher.search(COMMIT_HASH.query(commitId.getName()), new SimpleCollector() {

		private int docBase;
		
		@Override
		public void collect(int doc) throws IOException {
			indexVersion.set(searcher.doc(docBase+doc).get(COMMIT_INDEX_VERSION.name()));
		}

		@Override
		protected void doSetNextReader(LeafReaderContext context) throws IOException {
			docBase = context.docBase;
		}

		@Override
		public boolean needsScores() {
			return false;
		}

	});
	return indexVersion.get();
}
 
源代码3 项目: lucene-solr   文件: LatLonPointSpatialField.java
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
  return new DoubleValues() {

    @SuppressWarnings("unchecked")
    final FieldComparator<Double> comparator =
        (FieldComparator<Double>) getSortField(false).getComparator(1, 1);
    final LeafFieldComparator leafComparator = comparator.getLeafComparator(ctx);
    final double mult = multiplier; // so it's a local field

    double value = Double.POSITIVE_INFINITY;

    @Override
    public double doubleValue() throws IOException {
      return value;
    }

    @Override
    public boolean advanceExact(int doc) throws IOException {
      leafComparator.copy(0, doc);
      value = comparator.value(0) * mult;
      return true;
    }
  };
}
 
源代码4 项目: Elasticsearch   文件: CrateDocCollector.java
private Result collectLeaves(SimpleCollector collector,
                             Weight weight,
                             Iterator<LeafReaderContext> leaves,
                             @Nullable BulkScorer bulkScorer,
                             @Nullable LeafReaderContext leaf) throws IOException {
    if (bulkScorer != null) {
        assert leaf != null : "leaf must not be null if bulkScorer isn't null";
        if (processScorer(collector, leaf, bulkScorer)) return Result.PAUSED;
    }
    try {
        while (leaves.hasNext()) {
            leaf = leaves.next();
            LeafCollector leafCollector = collector.getLeafCollector(leaf);
            Scorer scorer = weight.scorer(leaf);
            if (scorer == null) {
                continue;
            }
            bulkScorer = new DefaultBulkScorer(scorer);
            if (processScorer(leafCollector, leaf, bulkScorer)) return Result.PAUSED;
        }
    } finally {
        searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION);
    }
    return Result.FINISHED;
}
 
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
  Terms terms = ctx.reader().terms(field);
  if (terms == null)
    return null;
  if (terms.hasPositions() == false) {
    throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
  }
  if (terms.hasPayloads() == false) {
    throw new IllegalArgumentException("Cannot create a payload-filtered iterator over field " + field + " because it has no indexed payloads");
  }
  TermsEnum te = terms.iterator();
  if (te.seekExact(term) == false) {
    return null;
  }
  return matches(te, doc);
}
 
源代码6 项目: lucene-solr   文件: TestLRUQueryCache.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, 1) {

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      scorerCreatedCount.incrementAndGet();
      return new ConstantScoreScorer(this, 1, scoreMode, DocIdSetIterator.all(context.reader().maxDoc()));
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return DocValues.isCacheable(ctx, field);
    }

  };
}
 
源代码7 项目: Elasticsearch   文件: IncludeNestedDocsQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    final Scorer parentScorer = parentWeight.scorer(context);

    // no matches
    if (parentScorer == null) {
        return null;
    }

    BitSet parents = parentsFilter.getBitSet(context);
    if (parents == null) {
        // No matches
        return null;
    }

    int firstParentDoc = parentScorer.iterator().nextDoc();
    if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
        // No matches
        return null;
    }
    return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
 
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
  Map<IntervalIterator, CachingMatchesIterator> lookup = new IdentityHashMap<>();
  for (IntervalsSource source : sources) {
    IntervalMatchesIterator mi = source.matches(field, ctx, doc);
    if (mi != null) {
      CachingMatchesIterator cmi = new CachingMatchesIterator(mi);
      lookup.put(IntervalMatches.wrapMatches(cmi, doc), cmi);
    }
  }
  if (lookup.size() < minShouldMatch) {
    return null;
  }
  MinimumShouldMatchIntervalIterator it = new MinimumShouldMatchIntervalIterator(lookup.keySet(), minShouldMatch);
  if (it.advance(doc) != doc) {
    return null;
  }
  if (it.nextInterval() == IntervalIterator.NO_MORE_INTERVALS) {
    return null;
  }
  return new MinimumMatchesIterator(it, lookup);
}
 
源代码9 项目: Elasticsearch   文件: NativeScriptEngineService.java
@Override
public SearchScript search(CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
    final NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript.compiled();
    return new SearchScript() {
        @Override
        public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
            AbstractSearchScript script = (AbstractSearchScript) scriptFactory.newScript(vars);
            script.setLookup(lookup.getLeafSearchLookup(context));
            return script;
        }
        @Override
        public boolean needsScores() {
            return scriptFactory.needsScores();
        }
    };
}
 
源代码10 项目: lucene-solr   文件: MultiCollector.java
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
  final List<LeafCollector> leafCollectors = new ArrayList<>(collectors.length);
  for (Collector collector : collectors) {
    final LeafCollector leafCollector;
    try {
      leafCollector = collector.getLeafCollector(context);
    } catch (CollectionTerminatedException e) {
      // this leaf collector does not need this segment
      continue;
    }
    leafCollectors.add(leafCollector);
  }
  switch (leafCollectors.size()) {
    case 0:
      throw new CollectionTerminatedException();
    case 1:
      return leafCollectors.get(0);
    default:
      return new MultiLeafCollector(leafCollectors, cacheScores, scoreMode() == ScoreMode.TOP_SCORES);
  }
}
 
源代码11 项目: lucene-solr   文件: TestBlockJoinValidation.java
public void testAdvanceValidationForToChildBjq() throws Exception {
  Query parentQuery = new MatchAllDocsQuery();
  ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter);

  final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0);
  Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
  Scorer scorer = weight.scorer(context);
  final Bits parentDocs = parentsFilter.getBitSet(context);

  int target;
  do {
    // make the parent scorer advance to a doc ID which is not a parent
    target = TestUtil.nextInt(random(), 0, context.reader().maxDoc() - 2);
  } while (parentDocs.get(target + 1));

  final int illegalTarget = target;
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    scorer.iterator().advance(illegalTarget);
  });
  assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE));
}
 
源代码12 项目: Elasticsearch   文件: FunctionScoreQuery.java
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
    Explanation subQueryExpl = subQueryWeight.explain(context, doc);
    if (!subQueryExpl.isMatch()) {
        return subQueryExpl;
    }
    Explanation expl;
    if (function != null) {
        Explanation functionExplanation = function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl);
        expl = combineFunction.explain(subQueryExpl, functionExplanation, maxBoost);
    } else {
        expl = subQueryExpl;
    }
    if (minScore != null && minScore > expl.getValue()) {
        expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl);
    }
    return expl;
}
 
源代码13 项目: Elasticsearch   文件: ValueCountAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
    return new LeafBucketCollectorBase(sub, values) {

        @Override
        public void collect(int doc, long bucket) throws IOException {
            counts = bigArrays.grow(counts, bucket + 1);
            values.setDocument(doc);
            counts.increment(bucket, values.count());
        }

    };
}
 
源代码14 项目: Elasticsearch   文件: MinAggregator.java
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx);
    final NumericDoubleValues values = MultiValueMode.MIN.select(allValues, Double.POSITIVE_INFINITY);
    return new LeafBucketCollectorBase(sub, allValues) {

        @Override
        public void collect(int doc, long bucket) throws IOException {
            if (bucket >= mins.size()) {
                long from = mins.size();
                mins = bigArrays.grow(mins, bucket + 1);
                mins.fill(from, mins.size(), Double.POSITIVE_INFINITY);
            }
            final double value = values.get(doc);
            double min = mins.get(bucket);
            min = Math.min(min, value);
            mins.set(bucket, min);
        }

    };
}
 
源代码15 项目: lucene-solr   文件: TermAutomatonQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {

  // Initialize the enums; null for a given slot means that term didn't appear in this reader
  EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()];

  boolean any = false;
  for(Map.Entry<Integer,TermStates> ent : termStates.entrySet()) {
    TermStates termStates = ent.getValue();
    assert termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
    BytesRef term = idToTerm.get(ent.getKey());
    TermState state = termStates.get(context);
    if (state != null) {
      TermsEnum termsEnum = context.reader().terms(field).iterator();
      termsEnum.seekExact(term, state);
      enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS));
      any = true;
    }
  }

  if (any) {
    return new TermAutomatonScorer(this, enums, anyTermID, idToTerm, new LeafSimScorer(stats, context.reader(), field, true));
  } else {
    return null;
  }
}
 
源代码16 项目: lucene-solr   文件: BlockGroupingCollector.java
@Override
protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
  if (subDocUpto != 0) {
    processGroup();
  }
  subDocUpto = 0;
  docBase = readerContext.docBase;
  //System.out.println("setNextReader base=" + docBase + " r=" + readerContext.reader);
  Scorer s = lastDocPerGroup.scorer(readerContext);
  if (s == null) {
    lastDocPerGroupBits = null;
  } else {
    lastDocPerGroupBits = s.iterator();
  }
  groupEndDocID = -1;

  currentReaderContext = readerContext;
  for (int i=0; i<comparators.length; i++) {
    leafComparators[i] = comparators[i].getLeafComparator(readerContext);
  }
}
 
源代码17 项目: lucene-solr   文件: SpanPayloadCheckQuery.java
@Override
public SpanScorer scorer(LeafReaderContext context) throws IOException {
  if (field == null)
    return null;

  Terms terms = context.reader().terms(field);
  if (terms != null && terms.hasPositions() == false) {
    throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
  }

  final Spans spans = getSpans(context, Postings.PAYLOADS);
  if (spans == null) {
    return null;
  }
  final LeafSimScorer docScorer = getSimScorer(context);
  return new SpanScorer(this, spans, docScorer);
}
 
源代码18 项目: lucene-solr   文件: ExportQParserPlugin.java
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
  final FixedBitSet set = new FixedBitSet(context.reader().maxDoc());
  this.sets[context.ord] = set;
  return new LeafCollector() {
    
    @Override
    public void setScorer(Scorable scorer) throws IOException {}
    
    @Override
    public void collect(int docId) throws IOException{
      ++totalHits;
      set.set(docId);
    }
  };
}
 
源代码19 项目: lucene-solr   文件: ExportWriter.java
protected void addDocsToItemWriter(List<LeafReaderContext> leaves, IteratorWriter.ItemWriter writer, SortDoc[] docsToExport, int outDocsIndex) throws IOException {
  try {
    for (int i = outDocsIndex; i >= 0; --i) {
      SortDoc s = docsToExport[i];
      writer.add((MapWriter) ew -> {
        writeDoc(s, leaves, ew);
        s.reset();
      });
    }
  } catch (Throwable e) {
    Throwable ex = e;
    while (ex != null) {
      String m = ex.getMessage();
      if (m != null && m.contains("Broken pipe")) {
        throw new IgnoreException();
      }
      ex = ex.getCause();
    }

    if (e instanceof IOException) {
      throw ((IOException) e);
    } else {
      throw new IOException(e);
    }
  }
}
 
源代码20 项目: lucene-solr   文件: GlobalOrdinalsWithScoreQuery.java
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  // disable caching because this query relies on a top reader context
  // and holds a bitset of matching ordinals that cannot be accounted in
  // the memory used by the cache
  return false;
}
 
源代码21 项目: lucene-solr   文件: AllGroupHeadsCollector.java
protected SortingGroupHead(Sort sort, T groupValue, int doc, LeafReaderContext context, Scorable scorer) throws IOException {
  super(groupValue, doc, context.docBase);
  final SortField[] sortFields = sort.getSort();
  comparators = new FieldComparator[sortFields.length];
  leafComparators = new LeafFieldComparator[sortFields.length];
  for (int i = 0; i < sortFields.length; i++) {
    comparators[i] = sortFields[i].getComparator(1, i);
    leafComparators[i] = comparators[i].getLeafComparator(context);
    leafComparators[i].setScorer(scorer);
    leafComparators[i].copy(0, doc);
    leafComparators[i].setBottom(0);
  }
}
 
源代码22 项目: mtas   文件: CodecCollector.java
/**
 * Compute termvector number basic.
 *
 * @param docSet
 *          the doc set
 * @param termDocId
 *          the term doc id
 * @param termsEnum
 *          the terms enum
 * @param r
 *          the r
 * @param lrc
 *          the lrc
 * @param postingsEnum
 *          the postings enum
 * @return the termvector number basic
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
private static TermvectorNumberBasic computeTermvectorNumberBasic(
    List<Integer> docSet, int termDocId, TermsEnum termsEnum, LeafReader r,
    LeafReaderContext lrc, PostingsEnum postingsEnum) throws IOException {
  TermvectorNumberBasic result = new TermvectorNumberBasic();
  boolean hasDeletedDocuments = (r.getLiveDocs() != null);
  if ((docSet.size() == r.numDocs()) && !hasDeletedDocuments) {
    try {
      return computeTermvectorNumberBasic(termsEnum, r);
    } catch (IOException e) {
      log.debug("problem", e);
      // problem
    }
  }
  result.docNumber = 0;
  result.valueSum[0] = 0;
  int localTermDocId = termDocId;
  Iterator<Integer> docIterator = docSet.iterator();
  postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.FREQS);
  int docId;
  while (docIterator.hasNext()) {
    docId = docIterator.next() - lrc.docBase;
    if (docId >= localTermDocId && ((docId == localTermDocId)
        || ((localTermDocId = postingsEnum.advance(docId)) == docId))) {
      result.docNumber++;
      result.valueSum[0] += postingsEnum.freq();
    }
    if (localTermDocId == DocIdSetIterator.NO_MORE_DOCS) {
      break;
    }
  }
  return result;
}
 
源代码23 项目: lucene-solr   文件: TestDisjunctionMaxQuery.java
public void testSkipToFirsttimeMiss() throws IOException {
  final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(
      Arrays.asList(tq("id", "d1"), tq("dek", "DOES_NOT_EXIST")), 0.0f);

  QueryUtils.check(random(), dq, s);
  assertTrue(s.getTopReaderContext() instanceof LeafReaderContext);
  final Weight dw = s.createWeight(s.rewrite(dq), ScoreMode.COMPLETE, 1);
  LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
  final Scorer ds = dw.scorer(context);
  final boolean skipOk = ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS;
  if (skipOk) {
    fail("firsttime skipTo found a match? ... "
        + r.document(ds.docID()).get("id"));
  }
}
 
源代码24 项目: lucene-solr   文件: LRUQueryCache.java
private DocIdSet cache(LeafReaderContext context) throws IOException {
  final BulkScorer scorer = in.bulkScorer(context);
  if (scorer == null) {
    return DocIdSet.EMPTY;
  } else {
    return cacheImpl(scorer, context.reader().maxDoc());
  }
}
 
源代码25 项目: lucene-solr   文件: IntervalQuery.java
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  IntervalIterator intervals = intervalsSource.intervals(field, context);
  if (intervals == null)
    return null;
  return new IntervalScorer(this, intervals, intervalsSource.minExtent(), boost, scoreFunction);
}
 
源代码26 项目: mtas   文件: MtasSpanFollowedByQuery.java
@Override
public MtasSpans getSpans(LeafReaderContext context,
    Postings requiredPostings) throws IOException {
  Terms terms = context.reader().terms(field);
  if (terms == null) {
    return null; // field does not exist
  }
  MtasSpanFollowedByQuerySpans s1 = new MtasSpanFollowedByQuerySpans(
      MtasSpanFollowedByQuery.this,
      w1.spanWeight.getSpans(context, requiredPostings));
  MtasSpanFollowedByQuerySpans s2 = new MtasSpanFollowedByQuerySpans(
      MtasSpanFollowedByQuery.this,
      w2.spanWeight.getSpans(context, requiredPostings));
  return new MtasSpanFollowedBySpans(MtasSpanFollowedByQuery.this, s1, s2);
}
 
源代码27 项目: lucene-solr   文件: TestFieldCacheSortRandom.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      Random random = new Random(seed ^ context.docBase);
      final int maxDoc = context.reader().maxDoc();
      final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
      assertNotNull(idSource);
      final FixedBitSet bits = new FixedBitSet(maxDoc);
      for(int docID=0;docID<maxDoc;docID++) {
        if (random.nextFloat() <= density) {
          bits.set(docID);
          //System.out.println("  acc id=" + idSource.getInt(docID) + " docID=" + docID);
          assertEquals(docID, idSource.advance(docID));
          matchValues.add(docValues.get((int) idSource.longValue()));
        }
      }

      return new ConstantScoreScorer(this, score(), scoreMode, new BitSetIterator(bits, bits.approximateCardinality()));
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return true;
    }
  };
}
 
源代码28 项目: lucene-solr   文件: BooleanWeight.java
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  if (query.clauses().size() > TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD) {
    // Disallow caching large boolean queries to not encourage users
    // to build large boolean queries as a workaround to the fact that
    // we disallow caching large TermInSetQueries.
    return false;
  }
  for (WeightedBooleanClause wc : weightedClauses) {
    Weight w = wc.weight;
    if (w.isCacheable(ctx) == false)
      return false;
  }
  return true;
}
 
源代码29 项目: Elasticsearch   文件: BitsetFilterCache.java
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query,new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}
 
源代码30 项目: lucene-solr   文件: DisjunctionMaxQuery.java
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  if (weights.size() > TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD) {
    // Disallow caching large dismax queries to not encourage users
    // to build large dismax queries as a workaround to the fact that
    // we disallow caching large TermInSetQueries.
    return false;
  }
  for (Weight w : weights) {
    if (w.isCacheable(ctx) == false)
      return false;
  }
  return true;
}
 
 类所在包
 类方法
 同包方法