类org.apache.lucene.index.IndexReaderContext源码实例Demo

下面列出了怎么用org.apache.lucene.index.IndexReaderContext的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: Elasticsearch   文件: BlendedTermQuery.java
@Override
public Query rewrite(IndexReader reader) throws IOException {
    IndexReaderContext context = reader.getContext();
    TermContext[] ctx = new TermContext[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermContext.build(context, terms[i]);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    Query query = topLevelQuery(terms, ctx, docFreqs, maxDoc);
    query.setBoost(getBoost());
    return query;
}
 
源代码2 项目: lucene-solr   文件: BlendedTermQuery.java
private static TermStates adjustFrequencies(IndexReaderContext readerContext,
                                            TermStates ctx, int artificialDf, long artificialTtf) throws IOException {
  List<LeafReaderContext> leaves = readerContext.leaves();
  final int len;
  if (leaves == null) {
    len = 1;
  } else {
    len = leaves.size();
  }
  TermStates newCtx = new TermStates(readerContext);
  for (int i = 0; i < len; ++i) {
    TermState termState = ctx.get(leaves.get(i));
    if (termState == null) {
      continue;
    }
    newCtx.register(termState, i);
  }
  newCtx.accumulateStatistics(artificialDf, artificialTtf);
  return newCtx;
}
 
源代码3 项目: lucene-solr   文件: HashQParserPlugin.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {

  SolrIndexSearcher solrIndexSearcher = (SolrIndexSearcher)searcher;
  IndexReaderContext context = solrIndexSearcher.getTopReaderContext();

  List<LeafReaderContext> leaves =  context.leaves();
  FixedBitSet[] fixedBitSets = new FixedBitSet[leaves.size()];

  for(LeafReaderContext leaf : leaves) {
    try {
      SegmentPartitioner segmentPartitioner = new SegmentPartitioner(leaf,worker,workers, keys, solrIndexSearcher);
      segmentPartitioner.run();
      fixedBitSets[segmentPartitioner.context.ord] = segmentPartitioner.docs;
    } catch(Exception e) {
      throw new IOException(e);
    }
  }

  ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(new BitsFilter(fixedBitSets));
  return searcher.rewrite(constantScoreQuery).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
}
 
public SecureIndexSearcher(IndexReaderContext context, ExecutorService executor,
    AccessControlFactory accessControlFactory, Collection<String> readAuthorizations,
    Collection<String> discoverAuthorizations, Set<String> discoverableFields, String defaultReadMaskMessage)
    throws IOException {
  super(context, executor);
  _accessControlFactory = accessControlFactory;
  _readAuthorizations = readAuthorizations;
  _discoverAuthorizations = discoverAuthorizations;
  _discoverableFields = discoverableFields;
  _defaultReadMaskMessage = defaultReadMaskMessage;
  _accessControlReader = _accessControlFactory.getReader(readAuthorizations, discoverAuthorizations,
      discoverableFields, _defaultReadMaskMessage);
  _secureIndexReader = getSecureIndexReader(context);
  List<AtomicReaderContext> leaves = _secureIndexReader.leaves();
  _leaveMap = new HashMap<Object, AtomicReaderContext>();
  for (AtomicReaderContext atomicReaderContext : leaves) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    SecureAtomicReader secureAtomicReader = (SecureAtomicReader) atomicReader;
    AtomicReader originalReader = secureAtomicReader.getOriginalReader();
    Object coreCacheKey = originalReader.getCoreCacheKey();
    _leaveMap.put(coreCacheKey, atomicReaderContext);
  }
}
 
源代码5 项目: crate   文件: BlendedTermQuery.java
@Override
public Query rewrite(IndexReader reader) throws IOException {
    Query rewritten = super.rewrite(reader);
    if (rewritten != this) {
        return rewritten;
    }
    IndexReaderContext context = reader.getContext();
    TermStates[] ctx = new TermStates[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermStates.build(context, terms[i], true);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
 
源代码6 项目: ltr4l   文件: DefaultLTRQParserPlugin.java
@Override
public Query parse() throws SyntaxError {
  IndexReaderContext context = req.getSearcher().getTopReaderContext();
  for(FieldFeatureExtractorFactory factory: featuresSpec){
    String fieldName = factory.getFieldName();
    FieldType fieldType = req.getSchema().getFieldType(fieldName);
    Analyzer analyzer = fieldType.getQueryAnalyzer();
    factory.init(context, FieldFeatureExtractorFactory.terms(fieldName, qstr, analyzer));
  }

  return new DefaultLTRQuery(featuresSpec, ranker);
}
 
源代码7 项目: ltr4l   文件: AbstractLTRQueryTestCase.java
protected FieldFeatureExtractorFactory getTF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureTFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
源代码8 项目: ltr4l   文件: AbstractLTRQueryTestCase.java
protected FieldFeatureExtractorFactory getIDF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureIDFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
源代码9 项目: ltr4l   文件: AbstractLTRQueryTestCase.java
protected FieldFeatureExtractorFactory getTFIDF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureTFIDFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
源代码10 项目: ltr4l   文件: AbstractLTRQueryTestCase.java
protected FieldFeatureExtractorFactory getSV(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureStoredValueExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
源代码11 项目: Elasticsearch   文件: BitsetFilterCache.java
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query,new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}
 
public AbstractAuthorityQueryWeight(SolrIndexSearcher searcher, boolean needsScores, Query query, String authTermName, String authTermText) throws IOException
{
	super(query);
    this.searcher = searcher;
    searcher.collectionStatistics(authTermName);
    final IndexReaderContext context = searcher.getTopReaderContext();
    final Term term = new Term(authTermName, authTermText);
    final TermContext termContext = TermContext.build(context, term);
    searcher.termStatistics(term, termContext);
    this.needsScores = needsScores;
}
 
源代码13 项目: linden   文件: LindenFieldCacheImpl.java
public UIDMaps getUIDMaps(IndexReaderContext topReaderContext, String uidField) throws IOException {
  PerReaderUIDMaps[] uidMapsArray = new PerReaderUIDMaps[topReaderContext.leaves().size()];
  for (int i = 0; i < topReaderContext.leaves().size(); ++i) {
    uidMapsArray[i] = (PerReaderUIDMaps) caches.get(UIDCache.class)
        .get(topReaderContext.leaves().get(i).reader(), new CacheKey(uidField, null), false);
  }
  return new UIDMaps(uidMapsArray);
}
 
/** Calculates facets between {@code start} and {@code end} to a detail level one greater than that provided by the
 * arguments. For example providing March to October of 2014 would return facets to the day level of those months.
 * This is just a convenience method.
 * @see #calcFacets(IndexReaderContext, Bits, Shape, int)
 */
public Facets calcFacets(IndexReaderContext context, Bits topAcceptDocs, UnitNRShape start, UnitNRShape end)
    throws IOException {
  Shape facetRange = getGrid().toRangeShape(start, end);
  int detailLevel = Math.max(start.getLevel(), end.getLevel()) + 1;
  return calcFacets(context, topAcceptDocs, facetRange, detailLevel);
}
 
源代码15 项目: lucene-solr   文件: TestGrouping.java
public ShardState(IndexSearcher s) {
  final IndexReaderContext ctx = s.getTopReaderContext();
  final List<LeafReaderContext> leaves = ctx.leaves();
  subSearchers = new ShardSearcher[leaves.size()];
  for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
    subSearchers[searcherIDX] = new ShardSearcher(leaves.get(searcherIDX), ctx);
  }

  docStarts = new int[subSearchers.length];
  for(int subIDX=0;subIDX<docStarts.length;subIDX++) {
    docStarts[subIDX] = leaves.get(subIDX).docBase;
    //System.out.println("docStarts[" + subIDX + "]=" + docStarts[subIDX]);
  }
}
 
源代码16 项目: lucene-solr   文件: TermAutomatonQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  IndexReaderContext context = searcher.getTopReaderContext();
  Map<Integer,TermStates> termStates = new HashMap<>();

  for (Map.Entry<BytesRef,Integer> ent : termToID.entrySet()) {
    if (ent.getKey() != null) {
      termStates.put(ent.getValue(), TermStates.build(context, new Term(field, ent.getKey()), scoreMode.needsScores()));
    }
  }

  return new TermAutomatonWeight(det, searcher, termStates, boost);
}
 
源代码17 项目: lucene-solr   文件: TermQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStates termState;
  if (perReaderTermState == null
      || perReaderTermState.wasBuiltFor(context) == false) {
    termState = TermStates.build(context, term, scoreMode.needsScores());
  } else {
    // PRTS was pre-build for this IS
    termState = this.perReaderTermState;
  }

  return new TermWeight(searcher, scoreMode, boost, termState);
}
 
源代码18 项目: lucene-solr   文件: IndexSearcher.java
IndexSearcher(IndexReaderContext context, Executor executor, SliceExecutor sliceExecutor) {
  assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
  assert (sliceExecutor == null) == (executor==null);

  reader = context.reader();
  this.executor = executor;
  this.sliceExecutor = sliceExecutor;
  this.readerContext = context;
  leafContexts = context.leaves();
  this.leafSlices = executor == null ? null : slices(leafContexts);
}
 
源代码19 项目: lucene-solr   文件: SpanTermQuery.java
@Override
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final TermStates context;
  final IndexReaderContext topContext = searcher.getTopReaderContext();
  if (termStates == null || termStates.wasBuiltFor(topContext) == false) {
    context = TermStates.build(topContext, term, scoreMode.needsScores());
  }
  else {
    context = termStates;
  }
  return new SpanTermWeight(context, searcher, scoreMode.needsScores() ? Collections.singletonMap(term, context) : null, boost);
}
 
源代码20 项目: lucene-solr   文件: LRUQueryCache.java
@Override
public boolean test(LeafReaderContext context) {
  final int maxDoc = context.reader().maxDoc();
  if (maxDoc < minSize) {
    return false;
  }
  final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
  final float sizeRatio = (float) context.reader().maxDoc() / topLevelContext.reader().maxDoc();
  return sizeRatio >= minSizeRatio;
}
 
源代码21 项目: lucene-solr   文件: TestNearSpansOrdered.java
/**
 * not a direct test of NearSpans, but a demonstration of how/when
 * this causes problems
 */
public void testSpanNearScorerSkipTo1() throws Exception {
  SpanNearQuery q = makeQuery();
  Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1);
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  LeafReaderContext leave = topReaderContext.leaves().get(0);
  Scorer s = w.scorer(leave);
  assertEquals(1, s.iterator().advance(1));
}
 
源代码22 项目: lucene-solr   文件: GraphTermsQParserPlugin.java
public DocSet getDocSet(IndexSearcher searcher) throws IOException {
  IndexReaderContext top = ReaderUtil.getTopLevelContext(searcher.getTopReaderContext());
  List<LeafReaderContext> segs = top.leaves();
  DocSetBuilder builder = new DocSetBuilder(top.reader().maxDoc(), Math.min(64,(top.reader().maxDoc()>>>10)+4));
  PointValues[] segPoints = new PointValues[segs.size()];
  for (int i=0; i<segPoints.length; i++) {
    segPoints[i] = segs.get(i).reader().getPointValues(field);
  }

  int maxCollect = Math.min(maxDocFreq, top.reader().maxDoc());

  PointSetQuery.CutoffPointVisitor visitor = new PointSetQuery.CutoffPointVisitor(maxCollect);
  PrefixCodedTerms.TermIterator iterator = sortedPackedPoints.iterator();
  outer: for (BytesRef point = iterator.next(); point != null; point = iterator.next()) {
    visitor.setPoint(point);
    for (int i=0; i<segs.size(); i++) {
      if (segPoints[i] == null) continue;
      visitor.setBase(segs.get(i).docBase);
      segPoints[i].intersect(visitor);
      if (visitor.getCount() > maxDocFreq) {
        continue outer;
      }
    }
    int collected = visitor.getCount();
    int[] ids = visitor.getGlobalIds();
    for (int i=0; i<collected; i++) {
      builder.add( ids[i] );
    }
  }

  FixedBitSet liveDocs = getLiveDocs(searcher);
  DocSet set = builder.build(liveDocs);
  return set;
}
 
源代码23 项目: lucene-solr   文件: TermsComponent.java
private static void collectTermStates(IndexReaderContext topReaderContext, TermStates[] contextArray,
                                      Term[] queryTerms) throws IOException {
  TermsEnum termsEnum = null;
  for (LeafReaderContext context : topReaderContext.leaves()) {
    for (int i = 0; i < queryTerms.length; i++) {
      Term term = queryTerms[i];
      final Terms terms = context.reader().terms(term.field());
      if (terms == null) {
        // field does not exist
        continue;
      }
      termsEnum = terms.iterator();
      assert termsEnum != null;

      if (termsEnum == TermsEnum.EMPTY) continue;

      TermStates termStates = contextArray[i];
      if (termsEnum.seekExact(term.bytes())) {
        if (termStates == null) {
          termStates = new TermStates(topReaderContext);
          contextArray[i] = termStates;
        }
        termStates.accumulateStatistics(termsEnum.docFreq(), termsEnum.totalTermFreq());
      }
    }
  }
}
 
源代码24 项目: lucene-solr   文件: TestIndexSearcher.java
@SuppressWarnings({"unchecked"})
private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
  SchemaField sf = sqr.getSchema().getField(field);
  ValueSource vs = sf.getType().getValueSource(sf, null);
  @SuppressWarnings({"rawtypes"})
  Map context = ValueSource.newContext(sqr.getSearcher());
  vs.createWeight(context, sqr.getSearcher());
  IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
  List<LeafReaderContext> leaves = topReaderContext.leaves();
  int idx = ReaderUtil.subIndex(doc, leaves);
  LeafReaderContext leaf = leaves.get(idx);
  FunctionValues vals = vs.getValues(context, leaf);
  return vals.strVal(doc-leaf.docBase);
}
 
源代码25 项目: lucene4ir   文件: TermsSet.java
private Set<String> getTerms(IndexReader ir) {
    Set<String> t = new HashSet<>();
    for (int i = 0; i < ir.leaves().size(); i++) {
        Terms termsList;
        try {
            // Get all the terms at this level of the tree.
            termsList = ir.leaves().get(i).reader().terms(Lucene4IRConstants.FIELD_ALL);
            if (termsList != null && termsList.size() > 0) {
                TermsEnum te = termsList.iterator();
                BytesRef termBytes;
                while ((termBytes = te.next()) != null) {
                    t.add(termBytes.utf8ToString());
                }
            }

            // Get all the terms at the next level of the tree.
            if (ir.leaves().get(i).children() != null && ir.leaves().get(i).children().size() > 0) {
                for (IndexReaderContext c : ir.leaves().get(i).children()) {
                    t.addAll(getTerms(c.reader()));
                }
            }

        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    return t;
}
 
源代码26 项目: mtas   文件: MtasSpanMatchAllQuery.java
@Override
public void extractTermContexts(Map<Term, TermContext> contexts) {
  Term term = new Term(field);
  if (!contexts.containsKey(term)) {
    IndexReaderContext topContext = searcher.getTopReaderContext();
    try {
      contexts.put(term, TermContext.build(topContext, term));
    } catch (IOException e) {
      log.debug(e);
      // fail
    }
  }
}
 
源代码27 项目: mtas   文件: MtasExtendedSpanTermQuery.java
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost)
    throws IOException {
  final TermContext context;
  final IndexReaderContext topContext = searcher.getTopReaderContext();
  if (termContext == null) {
    context = TermContext.build(topContext, localTerm);
  } else {
    context = termContext;
  }
  return new SpanTermWeight(context, searcher,
      needsScores ? Collections.singletonMap(localTerm, context) : null, boost);
}
 
源代码28 项目: querqy   文件: DocumentFrequencyCorrection.java
public DocumentFrequencyAndTermContext getDocumentFrequencyAndTermContext(final int tqIndex,
                                                                          final IndexReaderContext indexReaderContext)
        throws IOException {

    TermStats ts = termStats;
    if (ts == null || ts.topReaderContext != indexReaderContext) {
        ts = calculateTermContexts(indexReaderContext);
    }

    return new DocumentFrequencyAndTermContext(ts.documentFrequencies[tqIndex], ts.termStates[tqIndex]);
}
 
源代码29 项目: querqy   文件: FieldBoostTermQueryBuilder.java
@Override
public Weight createWeight(final IndexSearcher searcher, final ScoreMode scoreMode, final float boost)
        throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermStates termState = TermStates.build(context, term, scoreMode.needsScores());
    // TODO: set boosts to 1f if needsScores is false?
    return new FieldBoostWeight(termState, boost, fieldBoost.getBoost(term.field(), searcher.getIndexReader()));
}
 
@Override
public long getSegmentCount() throws IOException {
  IndexSearcherCloseable indexSearcherClosable = getIndexSearcher(false);
  try {
    IndexReader indexReader = indexSearcherClosable.getIndexReader();
    IndexReaderContext context = indexReader.getContext();
    return context.leaves().size();
  } finally {
    indexSearcherClosable.close();
  }
}
 
 类所在包
 类方法
 同包方法