org.apache.lucene.index.Terms#hasPositions ( )源码实例Demo

下面列出了org.apache.lucene.index.Terms#hasPositions ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Elasticsearch   文件: TermVectorsResponse.java
private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException {
    if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) {
        return;
    }

    builder.startArray(FieldStrings.TOKENS);
    for (int i = 0; i < termFreq; i++) {
        builder.startObject();
        if (curTerms.hasPositions()) {
            builder.field(FieldStrings.POS, currentPositions[i]);
        }
        if (curTerms.hasOffsets()) {
            builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]);
            builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]);
        }
        if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) {
            builder.field(FieldStrings.PAYLOAD, currentPayloads[i]);
        }
        builder.endObject();
    }
    builder.endArray();
}
 
源代码2 项目: Elasticsearch   文件: TermVectorsResponse.java
private void initValues(Terms curTerms, PostingsEnum posEnum, int termFreq) throws IOException {
    for (int j = 0; j < termFreq; j++) {
        int nextPos = posEnum.nextPosition();
        if (curTerms.hasPositions()) {
            currentPositions[j] = nextPos;
        }
        if (curTerms.hasOffsets()) {
            currentStartOffset[j] = posEnum.startOffset();
            currentEndOffset[j] = posEnum.endOffset();
        }
        if (curTerms.hasPayloads()) {
            BytesRef curPayload = posEnum.getPayload();
            if (curPayload != null) {
                currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length);
            } else {
                currentPayloads[j] = null;
            }
        }
    }
}
 
@Override
public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
  Terms terms = ctx.reader().terms(field);
  if (terms == null)
    return null;
  if (terms.hasPositions() == false) {
    throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
  }
  if (terms.hasPayloads() == false) {
    throw new IllegalArgumentException("Cannot create a payload-filtered iterator over field " + field + " because it has no indexed payloads");
  }
  TermsEnum te = terms.iterator();
  if (te.seekExact(term) == false) {
    return null;
  }
  return intervals(te);
}
 
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
  Terms terms = ctx.reader().terms(field);
  if (terms == null)
    return null;
  if (terms.hasPositions() == false) {
    throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
  }
  if (terms.hasPayloads() == false) {
    throw new IllegalArgumentException("Cannot create a payload-filtered iterator over field " + field + " because it has no indexed payloads");
  }
  TermsEnum te = terms.iterator();
  if (te.seekExact(term) == false) {
    return null;
  }
  return matches(te, doc);
}
 
源代码5 项目: lucene-solr   文件: SpanPayloadCheckQuery.java
@Override
public SpanScorer scorer(LeafReaderContext context) throws IOException {
  if (field == null)
    return null;

  Terms terms = context.reader().terms(field);
  if (terms != null && terms.hasPositions() == false) {
    throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
  }

  final Spans spans = getSpans(context, Postings.PAYLOADS);
  if (spans == null) {
    return null;
  }
  final LeafSimScorer docScorer = getSimScorer(context);
  return new SpanScorer(this, spans, docScorer);
}
 
@Override
public SpanScorer scorer(LeafReaderContext context) throws IOException {
    if (field == null)
        return null;

    Terms terms = context.reader().terms(field);
    if (terms != null && !terms.hasPositions()) {
        throw new IllegalStateException("field \"" + field +
                "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
    }

    final Spans spans = getSpans(context, Postings.PAYLOADS);
    if (spans == null) {
        return null;
    }
    final Similarity.SimScorer docScorer = getSimScorer(context);
    return new SpanScorer(this, spans, docScorer);
}
 
源代码7 项目: Elasticsearch   文件: TermVectorsResponse.java
private void initMemory(Terms curTerms, int termFreq) {
    // init memory for performance reasons
    if (curTerms.hasPositions()) {
        currentPositions = ArrayUtil.grow(currentPositions, termFreq);
    }
    if (curTerms.hasOffsets()) {
        currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq);
        currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq);
    }
    if (curTerms.hasPayloads()) {
        currentPayloads = new BytesArray[termFreq];
    }
}
 
源代码8 项目: lucene-solr   文件: TermVectorLeafReader.java
public TermVectorLeafReader(String field, Terms terms) {
  fields = new Fields() {
    @Override
    public Iterator<String> iterator() {
      return Collections.singletonList(field).iterator();
    }

    @Override
    public Terms terms(String fld) throws IOException {
      if (!field.equals(fld)) {
        return null;
      }
      return terms;
    }

    @Override
    public int size() {
      return 1;
    }
  };

  IndexOptions indexOptions;
  if (!terms.hasFreqs()) {
    indexOptions = IndexOptions.DOCS;
  } else if (!terms.hasPositions()) {
    indexOptions = IndexOptions.DOCS_AND_FREQS;
  } else if (!terms.hasOffsets()) {
    indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
  } else {
    indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
  }
  FieldInfo fieldInfo = new FieldInfo(field, 0,
                                      true, true, terms.hasPayloads(),
                                      indexOptions, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false);
  fieldInfos = new FieldInfos(new FieldInfo[]{fieldInfo});
}
 
源代码9 项目: lucene-solr   文件: TermIntervalsSource.java
@Override
public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
  Terms terms = ctx.reader().terms(field);
  if (terms == null)
    return null;
  if (terms.hasPositions() == false) {
    throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
  }
  TermsEnum te = terms.iterator();
  if (te.seekExact(term) == false) {
    return null;
  }
  return intervals(term, te);
}
 
源代码10 项目: lucene-solr   文件: TermIntervalsSource.java
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
  Terms terms = ctx.reader().terms(field);
  if (terms == null)
    return null;
  if (terms.hasPositions() == false) {
    throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
  }
  TermsEnum te = terms.iterator();
  if (te.seekExact(term) == false) {
    return null;
  }
  return matches(te, doc, field);
}
 
源代码11 项目: lucene-solr   文件: SynonymQuery.java
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
  String field = terms[0].term.field();
  Terms indexTerms = context.reader().terms(field);
  if (indexTerms == null || indexTerms.hasPositions() == false) {
    return super.matches(context, doc);
  }
  List<Term> termList = Arrays.stream(terms)
      .map(TermAndBoost::getTerm)
      .collect(Collectors.toList());
  return MatchesUtils.forField(field, () -> DisjunctionMatchesIterator.fromTerms(context, doc, getQuery(), field, termList));
}
 
源代码12 项目: lucene-solr   文件: TestBlockPostingsFormat3.java
public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws Exception {
  if (leftTerms == null || rightTerms == null) {
    assertNull(leftTerms);
    assertNull(rightTerms);
    return;
  }
  assertTermsStatistics(leftTerms, rightTerms);
  
  // NOTE: we don't assert hasOffsets/hasPositions/hasPayloads because they are allowed to be different

  boolean bothHaveFreqs = leftTerms.hasFreqs() && rightTerms.hasFreqs();
  boolean bothHavePositions = leftTerms.hasPositions() && rightTerms.hasPositions();
  TermsEnum leftTermsEnum = leftTerms.iterator();
  TermsEnum rightTermsEnum = rightTerms.iterator();
  assertTermsEnum(leftTermsEnum, rightTermsEnum, true, bothHaveFreqs, bothHavePositions);
  
  assertTermsSeeking(leftTerms, rightTerms);
  
  if (deep) {
    int numIntersections = atLeast(3);
    for (int i = 0; i < numIntersections; i++) {
      String re = AutomatonTestUtil.randomRegexp(random());
      CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton());
      if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
        // TODO: test start term too
        TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
        TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
        assertTermsEnum(leftIntersection, rightIntersection, rarely(), bothHaveFreqs, bothHavePositions);
      }
    }
  }
}
 
源代码13 项目: lucene-solr   文件: SolrRangeQuery.java
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
  SolrRangeQuery query = SolrRangeQuery.this;
  final Terms terms = context.reader().terms(query.field);
  if (terms == null) {
    return null;
  }
  if (terms.hasPositions() == false) {
    return super.matches(context, doc);
  }
  return MatchesUtils.forField(query.field, () -> MatchesUtils.disjunction(context, doc, query, query.field, query.getTermsEnum(context)));
}
 
源代码14 项目: lucene-solr   文件: PhraseWildcardQuery.java
protected void checkTermsHavePositions(Terms terms) {
  if (!terms.hasPositions()) {
    throw new IllegalStateException("field \"" + field + "\" was indexed without position data;" +
        " cannot run " + PhraseWildcardQuery.class.getSimpleName());
  }
}