下面列出了org.apache.lucene.index.Terms#hasPositions ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException {
if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) {
return;
}
builder.startArray(FieldStrings.TOKENS);
for (int i = 0; i < termFreq; i++) {
builder.startObject();
if (curTerms.hasPositions()) {
builder.field(FieldStrings.POS, currentPositions[i]);
}
if (curTerms.hasOffsets()) {
builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]);
builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]);
}
if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) {
builder.field(FieldStrings.PAYLOAD, currentPayloads[i]);
}
builder.endObject();
}
builder.endArray();
}
private void initValues(Terms curTerms, PostingsEnum posEnum, int termFreq) throws IOException {
for (int j = 0; j < termFreq; j++) {
int nextPos = posEnum.nextPosition();
if (curTerms.hasPositions()) {
currentPositions[j] = nextPos;
}
if (curTerms.hasOffsets()) {
currentStartOffset[j] = posEnum.startOffset();
currentEndOffset[j] = posEnum.endOffset();
}
if (curTerms.hasPayloads()) {
BytesRef curPayload = posEnum.getPayload();
if (curPayload != null) {
currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length);
} else {
currentPayloads[j] = null;
}
}
}
}
@Override
public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
Terms terms = ctx.reader().terms(field);
if (terms == null)
return null;
if (terms.hasPositions() == false) {
throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
}
if (terms.hasPayloads() == false) {
throw new IllegalArgumentException("Cannot create a payload-filtered iterator over field " + field + " because it has no indexed payloads");
}
TermsEnum te = terms.iterator();
if (te.seekExact(term) == false) {
return null;
}
return intervals(te);
}
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
Terms terms = ctx.reader().terms(field);
if (terms == null)
return null;
if (terms.hasPositions() == false) {
throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
}
if (terms.hasPayloads() == false) {
throw new IllegalArgumentException("Cannot create a payload-filtered iterator over field " + field + " because it has no indexed payloads");
}
TermsEnum te = terms.iterator();
if (te.seekExact(term) == false) {
return null;
}
return matches(te, doc);
}
@Override
public SpanScorer scorer(LeafReaderContext context) throws IOException {
if (field == null)
return null;
Terms terms = context.reader().terms(field);
if (terms != null && terms.hasPositions() == false) {
throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
}
final Spans spans = getSpans(context, Postings.PAYLOADS);
if (spans == null) {
return null;
}
final LeafSimScorer docScorer = getSimScorer(context);
return new SpanScorer(this, spans, docScorer);
}
@Override
public SpanScorer scorer(LeafReaderContext context) throws IOException {
if (field == null)
return null;
Terms terms = context.reader().terms(field);
if (terms != null && !terms.hasPositions()) {
throw new IllegalStateException("field \"" + field +
"\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
}
final Spans spans = getSpans(context, Postings.PAYLOADS);
if (spans == null) {
return null;
}
final Similarity.SimScorer docScorer = getSimScorer(context);
return new SpanScorer(this, spans, docScorer);
}
private void initMemory(Terms curTerms, int termFreq) {
// init memory for performance reasons
if (curTerms.hasPositions()) {
currentPositions = ArrayUtil.grow(currentPositions, termFreq);
}
if (curTerms.hasOffsets()) {
currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq);
currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq);
}
if (curTerms.hasPayloads()) {
currentPayloads = new BytesArray[termFreq];
}
}
public TermVectorLeafReader(String field, Terms terms) {
fields = new Fields() {
@Override
public Iterator<String> iterator() {
return Collections.singletonList(field).iterator();
}
@Override
public Terms terms(String fld) throws IOException {
if (!field.equals(fld)) {
return null;
}
return terms;
}
@Override
public int size() {
return 1;
}
};
IndexOptions indexOptions;
if (!terms.hasFreqs()) {
indexOptions = IndexOptions.DOCS;
} else if (!terms.hasPositions()) {
indexOptions = IndexOptions.DOCS_AND_FREQS;
} else if (!terms.hasOffsets()) {
indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
} else {
indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS;
}
FieldInfo fieldInfo = new FieldInfo(field, 0,
true, true, terms.hasPayloads(),
indexOptions, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false);
fieldInfos = new FieldInfos(new FieldInfo[]{fieldInfo});
}
@Override
public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
Terms terms = ctx.reader().terms(field);
if (terms == null)
return null;
if (terms.hasPositions() == false) {
throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
}
TermsEnum te = terms.iterator();
if (te.seekExact(term) == false) {
return null;
}
return intervals(term, te);
}
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
Terms terms = ctx.reader().terms(field);
if (terms == null)
return null;
if (terms.hasPositions() == false) {
throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
}
TermsEnum te = terms.iterator();
if (te.seekExact(term) == false) {
return null;
}
return matches(te, doc, field);
}
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
String field = terms[0].term.field();
Terms indexTerms = context.reader().terms(field);
if (indexTerms == null || indexTerms.hasPositions() == false) {
return super.matches(context, doc);
}
List<Term> termList = Arrays.stream(terms)
.map(TermAndBoost::getTerm)
.collect(Collectors.toList());
return MatchesUtils.forField(field, () -> DisjunctionMatchesIterator.fromTerms(context, doc, getQuery(), field, termList));
}
public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws Exception {
if (leftTerms == null || rightTerms == null) {
assertNull(leftTerms);
assertNull(rightTerms);
return;
}
assertTermsStatistics(leftTerms, rightTerms);
// NOTE: we don't assert hasOffsets/hasPositions/hasPayloads because they are allowed to be different
boolean bothHaveFreqs = leftTerms.hasFreqs() && rightTerms.hasFreqs();
boolean bothHavePositions = leftTerms.hasPositions() && rightTerms.hasPositions();
TermsEnum leftTermsEnum = leftTerms.iterator();
TermsEnum rightTermsEnum = rightTerms.iterator();
assertTermsEnum(leftTermsEnum, rightTermsEnum, true, bothHaveFreqs, bothHavePositions);
assertTermsSeeking(leftTerms, rightTerms);
if (deep) {
int numIntersections = atLeast(3);
for (int i = 0; i < numIntersections; i++) {
String re = AutomatonTestUtil.randomRegexp(random());
CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton());
if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
// TODO: test start term too
TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
assertTermsEnum(leftIntersection, rightIntersection, rarely(), bothHaveFreqs, bothHavePositions);
}
}
}
}
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
SolrRangeQuery query = SolrRangeQuery.this;
final Terms terms = context.reader().terms(query.field);
if (terms == null) {
return null;
}
if (terms.hasPositions() == false) {
return super.matches(context, doc);
}
return MatchesUtils.forField(query.field, () -> MatchesUtils.disjunction(context, doc, query, query.field, query.getTermsEnum(context)));
}
protected void checkTermsHavePositions(Terms terms) {
if (!terms.hasPositions()) {
throw new IllegalStateException("field \"" + field + "\" was indexed without position data;" +
" cannot run " + PhraseWildcardQuery.class.getSimpleName());
}
}