类org.apache.lucene.search.Weight源码实例Demo

下面列出了怎么用org.apache.lucene.search.Weight的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: CompositeVerifyQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight indexQueryWeight = indexQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);//scores aren't unsupported

  return new ConstantScoreWeight(this, boost) {

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {

      final Scorer indexQueryScorer = indexQueryWeight.scorer(context);
      if (indexQueryScorer == null) {
        return null;
      }

      final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, indexQueryScorer.iterator());
      return new ConstantScoreScorer(this, score(), scoreMode, predFuncValues);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return predicateValueSource.isCacheable(ctx);
    }

  };
}
 
private void addMatchedQueries(HitContext hitContext, ImmutableMap<String, Query> namedQueries, List<String> matchedQueries) throws IOException {
    for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
        String name = entry.getKey();
        Query filter = entry.getValue();

        final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false);
        final Scorer scorer = weight.scorer(hitContext.readerContext());
        if (scorer == null) {
            continue;
        }
        final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
        if (twoPhase == null) {
            if (scorer.iterator().advance(hitContext.docId()) == hitContext.docId()) {
                matchedQueries.add(name);
            }
        } else {
            if (twoPhase.approximation().advance(hitContext.docId()) == hitContext.docId() && twoPhase.matches()) {
                matchedQueries.add(name);
            }
        }
    }
}
 
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    return new RandomAccessWeight(this) {
        @Override
        protected Bits getMatchingDocs(LeafReaderContext context) throws IOException {
            final int maxDoc = context.reader().maxDoc();
            final MultiGeoPointValues values = indexFieldData.load(context).getGeoPointValues();
            // checks to see if bounding box crosses 180 degrees
            if (topLeft.lon() > bottomRight.lon()) {
                return new Meridian180GeoBoundingBoxBits(maxDoc, values, topLeft, bottomRight);
            } else {
                return new GeoBoundingBoxBits(maxDoc, values, topLeft, bottomRight);
            }
        }
    };
}
 
源代码4 项目: lucene-solr   文件: TestBlockJoinValidation.java
public void testAdvanceValidationForToChildBjq() throws Exception {
  Query parentQuery = new MatchAllDocsQuery();
  ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter);

  final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0);
  Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
  Scorer scorer = weight.scorer(context);
  final Bits parentDocs = parentsFilter.getBitSet(context);

  int target;
  do {
    // make the parent scorer advance to a doc ID which is not a parent
    target = TestUtil.nextInt(random(), 0, context.reader().maxDoc() - 2);
  } while (parentDocs.get(target + 1));

  final int illegalTarget = target;
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    scorer.iterator().advance(illegalTarget);
  });
  assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE));
}
 
源代码5 项目: lucene-solr   文件: PKIndexSplitter.java
private void createIndex(IndexWriterConfig config, Directory target, DirectoryReader reader, Query preserveFilter, boolean negateFilter) throws IOException {
  boolean success = false;
  final IndexWriter w = new IndexWriter(target, config);
  try {
    final IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null);
    preserveFilter = searcher.rewrite(preserveFilter);
    final Weight preserveWeight = searcher.createWeight(preserveFilter, ScoreMode.COMPLETE_NO_SCORES, 1);
    final List<LeafReaderContext> leaves = reader.leaves();
    final CodecReader[] subReaders = new CodecReader[leaves.size()];
    int i = 0;
    for (final LeafReaderContext ctx : leaves) {
      subReaders[i++] = new DocumentFilteredLeafIndexReader(ctx, preserveWeight, negateFilter);
    }
    w.addIndexes(subReaders);
    success = true;
  } finally {
    if (success) {
      w.close();
    } else {
      IOUtils.closeWhileHandlingException(w);
    }
  }
}
 
源代码6 项目: lucene-solr   文件: ToParentBlockJoinQuery.java
public BlockJoinScorer(Weight weight, Scorer childScorer, BitSet parentBits, ScoreMode scoreMode) {
  super(weight);
  //System.out.println("Q.init firstChildDoc=" + firstChildDoc);
  this.parentBits = parentBits;
  this.childScorer = childScorer;
  this.scoreMode = scoreMode;
  childTwoPhase = childScorer.twoPhaseIterator();
  if (childTwoPhase == null) {
    childApproximation = childScorer.iterator();
    parentApproximation = new ParentApproximation(childApproximation, parentBits);
    parentTwoPhase = null;
  } else {
    childApproximation = childTwoPhase.approximation();
    parentApproximation = new ParentApproximation(childTwoPhase.approximation(), parentBits);
    parentTwoPhase = new ParentTwoPhase(parentApproximation, childTwoPhase);
  }
}
 
源代码7 项目: lucene-solr   文件: ValueSourceScorer.java
protected ValueSourceScorer(Weight weight, LeafReaderContext readerContext, FunctionValues values) {
  super(weight);
  this.values = values;
  final DocIdSetIterator approximation = DocIdSetIterator.all(readerContext.reader().maxDoc()); // no approximation!
  this.twoPhaseIterator = new TwoPhaseIterator(approximation) {
    @Override
    public boolean matches() throws IOException {
      return ValueSourceScorer.this.matches(approximation.docID());
    }

    @Override
    public float matchCost() {
      return ValueSourceScorer.this.matchCost();
    }
  };
  this.disi = TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator);
}
 
源代码8 项目: SearchServices   文件: SolrOwnerScorer.java
public static SolrOwnerScorer createOwnerScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException
{
    if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER)
    {
        DocSet ownedDocs = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority);

        if (ownedDocs == null)
        {
            // Cache miss: query the index for docs where the owner matches the authority. 
            ownedDocs = searcher.getDocSet(new TermQuery(new Term(QueryConstants.FIELD_OWNER, authority)));
            searcher.cacheInsert(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority, ownedDocs);
        }
        return new SolrOwnerScorer(weight, ownedDocs, context, searcher);
    }
    
    // Return an empty doc set, as the authority isn't a user.
    return new SolrOwnerScorer(weight, new BitDocSet(new FixedBitSet(0)), context, searcher);
}
 
/**
 * @param doc SolrDocument to check
 * @param idField field where the id is stored
 * @param fieldType type of id field
 * @param filterQuery Query to filter by
 * @param searcher SolrIndexSearcher on which to apply the filter query
 * @returns the internal docid, or -1 if doc is not found or doesn't match filter
 */
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
      Query filterQuery, SolrIndexSearcher searcher) throws IOException {
  int docid = -1;
  Field f = (Field)doc.getFieldValue(idField.getName());
  String idStr = f.stringValue();
  BytesRef idBytes = new BytesRef();
  fieldType.readableToIndexed(idStr, idBytes);
  // get the internal document id
  long segAndId = searcher.lookupId(idBytes);

    // if docid is valid, run it through the filter
  if (segAndId >= 0) {
    int segid = (int) segAndId;
    AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
    docid = segid + ctx.docBase;
    Weight weight = filterQuery.createWeight(searcher);
    Scorer scorer = weight.scorer(ctx, null);
    if (scorer == null || segid != scorer.advance(segid)) {
      // filter doesn't match.
      docid = -1;
    }
  }
  return docid;
}
 
源代码10 项目: lucene-solr   文件: AbstractPrefixTreeQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      DocIdSet docSet = getDocIdSet(context);
      if (docSet == null) {
        return null;
      }
      DocIdSetIterator disi = docSet.iterator();
      if (disi == null) {
        return null;
      }
      return new ConstantScoreScorer(this, score(), scoreMode, disi);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return true;
    }
  };
}
 
源代码11 项目: crate   文件: GroupingLongCollectorBenchmark.java
@Benchmark
public LongObjectHashMap<Long> measureGroupingOnNumericDocValues() throws Exception {
    Weight weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
    LeafReaderContext leaf = searcher.getTopReaderContext().leaves().get(0);
    Scorer scorer = weight.scorer(leaf);
    NumericDocValues docValues = DocValues.getNumeric(leaf.reader(), "x");
    DocIdSetIterator docIt = scorer.iterator();
    LongObjectHashMap<Long> sumByKey = new LongObjectHashMap<>();
    for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) {
        if (docValues.advanceExact(docId)) {
            long number = docValues.longValue();
            sumByKey.compute(number, (key, oldValue) -> {
                if (oldValue == null) {
                    return number;
                } else {
                    return oldValue + number;
                }
            });
        }
    }
    return sumByKey;
}
 
源代码12 项目: ltr4l   文件: AbstractLTRScorer.java
public AbstractLTRScorer(Weight luceneWeight, List<FieldFeatureExtractor[]> featuresSpec,
                                   DocIdSetIterator iterator, Ranker ranker){
  super(luceneWeight);
  this.featuresSpec = featuresSpec;
  this.iterator = iterator;
  this.ranker = ranker;
}
 
源代码13 项目: lucene-solr   文件: IntDocValues.java
@Override
public ValueSourceScorer getRangeScorer(Weight weight,  LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
  int lower,upper;

  // instead of using separate comparison functions, adjust the endpoints.

  if (lowerVal==null) {
    lower = Integer.MIN_VALUE;
  } else {
    lower = Integer.parseInt(lowerVal);
    if (!includeLower && lower < Integer.MAX_VALUE) lower++;
  }

   if (upperVal==null) {
    upper = Integer.MAX_VALUE;
  } else {
    upper = Integer.parseInt(upperVal);
    if (!includeUpper && upper > Integer.MIN_VALUE) upper--;
  }

  final int ll = lower;
  final int uu = upper;

  return new ValueSourceScorer(weight, readerContext, this) {
    @Override
    public boolean matches(int doc) throws IOException {
      if (!exists(doc)) return false;
      int val = intVal(doc);
      return val >= ll && val <= uu;
    }
  };
}
 
源代码14 项目: lucene-solr   文件: RegexCompletionQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  // If an empty regex is provided, we return an automaton that matches nothing. This ensures
  // consistency with PrefixCompletionQuery, which returns no results for an empty term.
  Automaton automaton = getTerm().text().isEmpty()
      ? Automata.makeEmpty()
      : new RegExp(getTerm().text(), flags).toAutomaton(maxDeterminizedStates);
  return new CompletionWeight(this, automaton);
}
 
源代码15 项目: Elasticsearch   文件: ChildrenQuery.java
protected ParentWeight(Query query, Weight childWeight, Filter parentFilter, long remaining, ParentCollector collector, int minChildren, int maxChildren) {
    super(query);
    this.childWeight = childWeight;
    this.parentFilter = parentFilter;
    this.remaining = remaining;
    this.collector = collector;
    this.minChildren = minChildren;
    this.maxChildren = maxChildren;
}
 
源代码16 项目: lucene-solr   文件: FunctionRangeQuery.java
@Override
public DelegatingCollector getFilterCollector(IndexSearcher searcher) {
  @SuppressWarnings({"rawtypes"})
  Map fcontext = ValueSource.newContext(searcher);
  Weight weight = rangeFilt.createWeight(searcher, ScoreMode.COMPLETE, 1);
  return new FunctionRangeCollector(fcontext, weight);
}
 
源代码17 项目: lucene-solr   文件: FilterQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  // SolrRequestInfo reqInfo = SolrRequestInfo.getRequestInfo();

  if (!(searcher instanceof SolrIndexSearcher)) {
    // delete-by-query won't have SolrIndexSearcher
    return new BoostQuery(new ConstantScoreQuery(q), 0).createWeight(searcher, scoreMode, 1f);
  }

  SolrIndexSearcher solrSearcher = (SolrIndexSearcher)searcher;
  DocSet docs = solrSearcher.getDocSet(q);
  // reqInfo.addCloseHook(docs);  // needed for off-heap refcounting

  return new BoostQuery(new SolrConstantScoreQuery(docs.getTopFilter()), 0).createWeight(searcher, scoreMode, 1f);
}
 
源代码18 项目: lucene-solr   文件: LTRRescorer.java
@Override
public Explanation explain(IndexSearcher searcher,
    Explanation firstPassExplanation, int docID) throws IOException {

  final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
      .leaves();
  final int n = ReaderUtil.subIndex(docID, leafContexts);
  final LeafReaderContext context = leafContexts.get(n);
  final int deBasedDoc = docID - context.docBase;
  final Weight modelWeight = searcher.createWeight(searcher.rewrite(scoringQuery),
      ScoreMode.COMPLETE, 1);
  return modelWeight.explain(context, deBasedDoc);
}
 
源代码19 项目: Elasticsearch   文件: DocumentMapper.java
/**
 * Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId.
 */
public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException {
    ObjectMapper nestedObjectMapper = null;
    for (ObjectMapper objectMapper : objectMappers().values()) {
        if (!objectMapper.nested().isNested()) {
            continue;
        }

        Query filter = objectMapper.nestedTypeFilter();
        if (filter == null) {
            continue;
        }
        // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and
        // therefor is guaranteed to be a live doc.
        final Weight nestedWeight = filter.createWeight(sc.searcher(), false);
        Scorer scorer = nestedWeight.scorer(context);
        if (scorer == null) {
            continue;
        }

        if (scorer.iterator().advance(nestedDocId) == nestedDocId) {
            if (nestedObjectMapper == null) {
                nestedObjectMapper = objectMapper;
            } else {
                if (nestedObjectMapper.fullPath().length() < objectMapper.fullPath().length()) {
                    nestedObjectMapper = objectMapper;
                }
            }
        }
    }
    return nestedObjectMapper;
}
 
源代码20 项目: Elasticsearch   文件: IndexCacheableQuery.java
@Override
public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    if (readerCacheKey == null) {
        throw new IllegalStateException("Rewrite first");
    }
    if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) {
        throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting");
    }
    return doCreateWeight(searcher, needsScores);
}
 
源代码21 项目: Elasticsearch   文件: MinScoreScorer.java
MinScoreScorer(Weight weight, Scorer scorer, float minScore) {
    super(weight);
    if (scorer instanceof ScoreCachingWrappingScorer == false) {
        // when minScore is set, scores might be requested twice: once
        // to verify the match, and once by the collector
        scorer = new ScoreCachingWrappingScorer(scorer);
    }
    this.in = scorer;
    this.minScore = minScore;
}
 
源代码22 项目: lucene-solr   文件: FunctionScoreQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  ScoreMode sm;
  if (scoreMode.needsScores() && source.needsScores()) {
    sm = ScoreMode.COMPLETE;
  } else {
    sm = ScoreMode.COMPLETE_NO_SCORES;
  }
  Weight inner = in.createWeight(searcher, sm, 1f);
  if (scoreMode.needsScores() == false)
    return inner;
  return new FunctionScoreWeight(this, inner, source.rewrite(searcher), boost);
}
 
源代码23 项目: lucene-solr   文件: LTRScoringQuery.java
public ModelScorer(Weight weight, List<Feature.FeatureWeight.FeatureScorer> featureScorers) {
  super(weight);
  docInfo = new DocInfo();
  for (final Feature.FeatureWeight.FeatureScorer subSocer : featureScorers) {
    subSocer.setDocInfo(docInfo);
  }
  if (featureScorers.size() <= 1) {
    // future enhancement: allow the use of dense features in other cases
    featureTraversalScorer = new DenseModelScorer(weight, featureScorers);
  } else {
    featureTraversalScorer = new SparseModelScorer(weight, featureScorers);
  }
}
 
源代码24 项目: incubator-retired-blur   文件: FacetQuery.java
private Weight[] getWeights(IndexSearcher searcher) throws IOException {
  Weight[] weights = new Weight[_facets.length];
  for (int i = 0; i < weights.length; i++) {
    weights[i] = _facets[i].createWeight(searcher);
  }
  return weights;
}
 
源代码25 项目: lucene-solr   文件: ShapeQuery.java
protected Scorer getScorer(final LeafReader reader, final Weight weight, final float boost, final ScoreMode scoreMode) throws IOException {
  switch (query.getQueryRelation()) {
    case INTERSECTS: return getSparseScorer(reader, weight, boost, scoreMode);
    case WITHIN:
    case DISJOINT: return getDenseScorer(reader, weight, boost, scoreMode);
    case CONTAINS: return getContainsDenseScorer(reader, weight, boost, scoreMode);
    default: throw new IllegalArgumentException("Unsupported query type :[" + query.getQueryRelation() + "]");
  }
}
 
源代码26 项目: lucene-solr   文件: SpanOrQuery.java
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  for (Weight w : subWeights) {
    if (w.isCacheable(ctx) == false)
      return false;
  }
  return true;
}
 
源代码27 项目: lucene-solr   文件: LongDocValues.java
@Override
public ValueSourceScorer getRangeScorer(Weight weight,  LeafReaderContext readerContext, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
  long lower,upper;

  // instead of using separate comparison functions, adjust the endpoints.

  if (lowerVal==null) {
    lower = Long.MIN_VALUE;
  } else {
    lower = externalToLong(lowerVal);
    if (!includeLower && lower < Long.MAX_VALUE) lower++;
  }

   if (upperVal==null) {
    upper = Long.MAX_VALUE;
  } else {
    upper = externalToLong(upperVal);
    if (!includeUpper && upper > Long.MIN_VALUE) upper--;
  }

  final long ll = lower;
  final long uu = upper;

  return new ValueSourceScorer(weight, readerContext, this) {
    @Override
    public boolean matches(int doc) throws IOException {
      if (!exists(doc)) return false;
      long val = longVal(doc);
      return val >= ll && val <= uu;
    }
  };
}
 
源代码28 项目: SearchServices   文件: SolrReaderSetQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScore) throws IOException
{
    if(!(searcher instanceof SolrIndexSearcher))
    {
        throw new IllegalStateException("Must have a SolrIndexSearcher");
    }
    return new SolrReaderSetQueryWeight((SolrIndexSearcher)searcher, this, authorities);
}
 
源代码29 项目: lucene-solr   文件: ContextQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final CompletionWeight innerWeight = ((CompletionWeight) innerQuery.createWeight(searcher, scoreMode, boost));
  final Automaton innerAutomaton = innerWeight.getAutomaton();

  // If the inner automaton matches nothing, then we return an empty weight to avoid
  // traversing all contexts during scoring.
  if (innerAutomaton.getNumStates() == 0) {
    return new CompletionWeight(this, innerAutomaton);
  }

  // if separators are preserved the fst contains a SEP_LABEL
  // behind each gap. To have a matching automaton, we need to
  // include the SEP_LABEL in the query as well
  Automaton optionalSepLabel = Operations.optional(Automata.makeChar(ConcatenateGraphFilter.SEP_LABEL));
  Automaton prefixAutomaton = Operations.concatenate(optionalSepLabel, innerAutomaton);
  Automaton contextsAutomaton = Operations.concatenate(toContextAutomaton(contexts, matchAllContexts), prefixAutomaton);
  contextsAutomaton = Operations.determinize(contextsAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);

  final Map<IntsRef, Float> contextMap = new HashMap<>(contexts.size());
  final TreeSet<Integer> contextLengths = new TreeSet<>();
  for (Map.Entry<IntsRef, ContextMetaData> entry : contexts.entrySet()) {
    ContextMetaData contextMetaData = entry.getValue();
    contextMap.put(entry.getKey(), contextMetaData.boost);
    contextLengths.add(entry.getKey().length);
  }
  int[] contextLengthArray = new int[contextLengths.size()];
  final Iterator<Integer> iterator = contextLengths.descendingIterator();
  for (int i = 0; iterator.hasNext(); i++) {
    contextLengthArray[i] = iterator.next();
  }
  return new ContextCompletionWeight(this, contextsAutomaton, innerWeight, contextMap, contextLengthArray);
}
 
源代码30 项目: lucene-solr   文件: ScoreJoinQParserPlugin.java
@Override
public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException {
  SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
  final Query jq = JoinUtil.createJoinQuery(fromField, true,
      toField, fromQuery, info.getReq().getSearcher(), this.scoreMode);
  return jq.rewrite(searcher.getIndexReader()).createWeight(searcher, scoreMode, boost);
}
 
 类所在包
 同包方法