类org.apache.lucene.search.spans.SpanQuery源码实例Demo

下面列出了怎么用org.apache.lucene.search.spans.SpanQuery的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: HighlighterTest.java
public void testSpanHighlighting() throws Exception {
  Query query1 = new SpanNearQuery(new SpanQuery[] {
      new SpanTermQuery(new Term(FIELD_NAME, "wordx")),
      new SpanTermQuery(new Term(FIELD_NAME, "wordy")) }, 1, false);
  Query query2 = new SpanNearQuery(new SpanQuery[] {
      new SpanTermQuery(new Term(FIELD_NAME, "wordy")),
      new SpanTermQuery(new Term(FIELD_NAME, "wordc")) }, 1, false);
  BooleanQuery.Builder bquery = new BooleanQuery.Builder();
  bquery.add(query1, Occur.SHOULD);
  bquery.add(query2, Occur.SHOULD);
  doSearching(bquery.build());
  TestHighlightRunner helper = new TestHighlightRunner() {

    @Override
    public void run() throws Exception {
      mode = QUERY;
      doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
    }
  };

  helper.run();
  assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
      numHighlights == 7);
}
 
源代码2 项目: lucene-solr   文件: HighlighterTest.java
public void testNotSpanSimpleQuery() throws Exception {
  doSearching(new SpanNotQuery(new SpanNearQuery(new SpanQuery[] {
      new SpanTermQuery(new Term(FIELD_NAME, "shot")),
      new SpanTermQuery(new Term(FIELD_NAME, "kennedy")) }, 3, false), new SpanTermQuery(
      new Term(FIELD_NAME, "john"))));
  TestHighlightRunner helper = new TestHighlightRunner() {

    @Override
    public void run() throws Exception {
      mode = QUERY;
      doStandardHighlights(analyzer, searcher, hits, query, HighlighterTest.this);
    }
  };

  helper.run();
  assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
      numHighlights == 4);
}
 
源代码3 项目: lucene-solr   文件: SpanNearBuilder.java
@Override
public SpanQuery getSpanQuery(Element e) throws ParserException {
  String slopString = DOMUtils.getAttributeOrFail(e, "slop");
  int slop = Integer.parseInt(slopString);
  boolean inOrder = DOMUtils.getAttribute(e, "inOrder", false);
  List<SpanQuery> spans = new ArrayList<>();
  for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling()) {
    if (kid.getNodeType() == Node.ELEMENT_NODE) {
      spans.add(factory.getSpanQuery((Element) kid));
    }
  }
  SpanQuery[] spanQueries = spans.toArray(new SpanQuery[spans.size()]);
  SpanQuery snq = new SpanNearQuery(spanQueries, slop, inOrder);
  float boost = DOMUtils.getAttribute(e, "boost", 1.0f);
  return new SpanBoostQuery(snq, boost);
}
 
public void testFilteredOutSpan() throws IOException {
  indexWriter.addDocument(newDoc("freezing cold stuff like stuff freedom of speech"));
  initReaderSearcherHighlighter();

  WildcardQuery wildcardQuery = new WildcardQuery(new Term("body", "free*"));
  SpanMultiTermQueryWrapper<WildcardQuery> wildcardSpanQuery = new SpanMultiTermQueryWrapper<>(wildcardQuery);
  SpanTermQuery termQuery = new SpanTermQuery(new Term("body", "speech"));
  SpanQuery spanQuery = new SpanNearQuery(new SpanQuery[]{wildcardSpanQuery, termQuery}, 3, false);

  BooleanQuery query = new BooleanQuery.Builder()
      .add(spanQuery, BooleanClause.Occur.MUST)
      .build();

  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  String[] snippets = highlighter.highlight("body", query, topDocs);
  // spans' MatchesIterator exposes each underlying term; thus doesn't enclose intermediate "of"
  assertArrayEquals(new String[]{"freezing cold stuff like stuff <b>freedom</b> of <b>speech</b>"}, snippets);
}
 
public void testReverseOrderSpanCollection() throws IOException {
  // Processing order may depend on various optimizations or other weird factor.
  indexWriter.addDocument(newDoc("alpha bravo - alpha charlie"));
  indexWriter.addDocument(newDoc("alpha charlie - alpha bravo"));
  initReaderSearcherHighlighter();

  SpanNearQuery query = new SpanNearQuery(new SpanQuery[]{
      new SpanNearQuery(new SpanQuery[]{
          new SpanTermQuery(new Term("body", "alpha")),
          new SpanTermQuery(new Term("body", "bravo"))
      }, 0, true),
      new SpanNearQuery(new SpanQuery[]{
          new SpanTermQuery(new Term("body", "alpha")),
          new SpanTermQuery(new Term("body", "charlie"))
      }, 0, true)
  }, 10, false);

  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  String[] snippets = highlighter.highlight("body", query, topDocs);

  assertArrayEquals(new String[]{
          "<b>alpha</b> <b>bravo</b> - <b>alpha</b> <b>charlie</b>",
          "<b>alpha</b> <b>charlie</b> - <b>alpha</b> <b>bravo</b>",
      },
      snippets);
}
 
源代码6 项目: lucene-solr   文件: DistanceQuery.java
public Query getSpanNearQuery(
        IndexReader reader,
        String fieldName,
        BasicQueryFactory qf) throws IOException {
  SpanQuery[] spanClauses = new SpanQuery[getNrSubQueries()];
  Iterator<?> sqi = getSubQueriesIterator();
  int qi = 0;
  while (sqi.hasNext()) {
    SpanNearClauseFactory sncf = new SpanNearClauseFactory(reader, fieldName, qf);
    
    ((DistanceSubQuery)sqi.next()).addSpanQueries(sncf);
    if (sncf.size() == 0) { /* distance operator requires all sub queries */
      while (sqi.hasNext()) { /* produce evt. error messages but ignore results */
        ((DistanceSubQuery)sqi.next()).addSpanQueries(sncf);
        sncf.clear();
      }
      return new MatchNoDocsQuery();
    }
    
    spanClauses[qi] = sncf.makeSpanClause();
    qi++;
  }

  return new SpanNearQuery(spanClauses, getOpDistance() - 1, subQueriesOrdered());
}
 
源代码7 项目: lucene-solr   文件: TestPayloadScoreQuery.java
private static void checkQuery(SpanQuery query, PayloadFunction function, boolean includeSpanScore, int[] expectedDocs, float[] expectedScores) throws IOException {

    assertTrue("Expected docs and scores arrays must be the same length!", expectedDocs.length == expectedScores.length);

    PayloadScoreQuery psq = new PayloadScoreQuery(query, function, PayloadDecoder.FLOAT_DECODER, includeSpanScore);
    TopDocs hits = searcher.search(psq, expectedDocs.length);

    for (int i = 0; i < hits.scoreDocs.length; i++) {
      if (i > expectedDocs.length - 1)
        fail("Unexpected hit in document " + hits.scoreDocs[i].doc);
      if (hits.scoreDocs[i].doc != expectedDocs[i])
        fail("Unexpected hit in document " + hits.scoreDocs[i].doc);
      assertEquals("Bad score in document " + expectedDocs[i], expectedScores[i], hits.scoreDocs[i].score, 0.000001);
    }

    if (hits.scoreDocs.length > expectedDocs.length)
      fail("Unexpected hit in document " + hits.scoreDocs[expectedDocs.length]);

    QueryUtils.check(random(), psq, searcher);
  }
 
源代码8 项目: lucene-solr   文件: TestPayloadScoreQuery.java
@Test
public void testNearQuery() throws IOException {

  //   2     4
  // twenty two
  //  2     4      4     4
  // one hundred twenty two

  SpanNearQuery q = new SpanNearQuery(new SpanQuery[]{
                      new SpanTermQuery(new Term("field", "twenty")),
                      new SpanTermQuery(new Term("field", "two"))
                    }, 0, true);

  checkQuery(q, new MaxPayloadFunction(), new int[]{ 22, 122, 222 }, new float[]{ 4.0f, 4.0f, 4.0f });
  checkQuery(q, new MinPayloadFunction(), new int[]{ 122, 222, 22 }, new float[]{ 4.0f, 4.0f, 2.0f });
  checkQuery(q, new AveragePayloadFunction(), new int[] { 122, 222, 22 }, new float[] { 4.0f, 4.0f, 3.0f });

}
 
源代码9 项目: lucene-solr   文件: TestPayloadScoreQuery.java
@Test
public void testEquality() {
  SpanQuery sq1 = new SpanTermQuery(new Term("field", "one"));
  SpanQuery sq2 = new SpanTermQuery(new Term("field", "two"));
  PayloadFunction minFunc = new MinPayloadFunction();
  PayloadFunction maxFunc = new MaxPayloadFunction();
  PayloadScoreQuery query1 = new PayloadScoreQuery(sq1, minFunc, PayloadDecoder.FLOAT_DECODER, true);
  PayloadScoreQuery query2 = new PayloadScoreQuery(sq2, minFunc, PayloadDecoder.FLOAT_DECODER, true);
  PayloadScoreQuery query3 = new PayloadScoreQuery(sq2, maxFunc, PayloadDecoder.FLOAT_DECODER, true);
  PayloadScoreQuery query4 = new PayloadScoreQuery(sq2, maxFunc, PayloadDecoder.FLOAT_DECODER, false);
  PayloadScoreQuery query5 = new PayloadScoreQuery(sq1, minFunc, PayloadDecoder.FLOAT_DECODER);

  assertEquals(query1, query5);
  assertFalse(query1.equals(query2));
  assertFalse(query1.equals(query3));
  assertFalse(query1.equals(query4));
  assertFalse(query2.equals(query3));
  assertFalse(query2.equals(query4));
  assertFalse(query3.equals(query4));
}
 
源代码10 项目: lucene-solr   文件: TestPayloadTermQuery.java
public void test() throws IOException {
  SpanQuery query = new PayloadScoreQuery(new SpanTermQuery(new Term("field", "seventy")),
          new MaxPayloadFunction(), PayloadDecoder.FLOAT_DECODER);
  TopDocs hits = searcher.search(query, 100);
  assertTrue("hits is null and it shouldn't be", hits != null);
  assertTrue("hits Size: " + hits.totalHits.value + " is not: " + 100, hits.totalHits.value == 100);

  //they should all have the exact same score, because they all contain seventy once, and we set
  //all the other similarity factors to be 1

  for (int i = 0; i < hits.scoreDocs.length; i++) {
    ScoreDoc doc = hits.scoreDocs[i];
    assertTrue(doc.score + " does not equal: " + 1, doc.score == 1);
  }
  CheckHits.checkExplanations(query, PayloadHelper.FIELD, searcher, true);
  Spans spans = query.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1f).getSpans(searcher.getIndexReader().leaves().get(0), SpanWeight.Postings.POSITIONS);
  assertTrue("spans is null and it shouldn't be", spans != null);
  /*float score = hits.score(0);
  for (int i =1; i < hits.length(); i++)
  {
    assertTrue("scores are not equal and they should be", score == hits.score(i));
  }*/

}
 
源代码11 项目: lucene-solr   文件: TestPayloadCheckQuery.java
public void testUnorderedPayloadChecks() throws Exception {

    SpanTermQuery term5 = new SpanTermQuery(new Term("field", "five"));
    SpanTermQuery term100 = new SpanTermQuery(new Term("field", "hundred"));
    SpanTermQuery term4 = new SpanTermQuery(new Term("field", "four"));
    SpanNearQuery nearQuery = new SpanNearQuery(new SpanQuery[]{term5, term100, term4}, 0, false);

    List<BytesRef> payloads = new ArrayList<>();
    payloads.add(new BytesRef("pos: " + 2));
    payloads.add(new BytesRef("pos: " + 1));
    payloads.add(new BytesRef("pos: " + 0));

    SpanPayloadCheckQuery payloadQuery = new SpanPayloadCheckQuery(nearQuery, payloads);
    checkHits(payloadQuery, new int[]{ 405 });

    payloads.clear();
    payloads.add(new BytesRef("pos: " + 0));
    payloads.add(new BytesRef("pos: " + 1));
    payloads.add(new BytesRef("pos: " + 2));

    payloadQuery = new SpanPayloadCheckQuery(nearQuery, payloads);
    checkHits(payloadQuery, new int[]{ 504 });

  }
 
源代码12 项目: lucene-solr   文件: TestPayloadCheckQuery.java
public void testEquality() {
  SpanQuery sq1 = new SpanTermQuery(new Term("field", "one"));
  SpanQuery sq2 = new SpanTermQuery(new Term("field", "two"));
  BytesRef payload1 = new BytesRef("pay1");
  BytesRef payload2 = new BytesRef("pay2");
  SpanQuery query1 = new SpanPayloadCheckQuery(sq1, Collections.singletonList(payload1));
  SpanQuery query2 = new SpanPayloadCheckQuery(sq2, Collections.singletonList(payload1));
  SpanQuery query3 = new SpanPayloadCheckQuery(sq1, Collections.singletonList(payload2));
  SpanQuery query4 = new SpanPayloadCheckQuery(sq2, Collections.singletonList(payload2));
  SpanQuery query5 = new SpanPayloadCheckQuery(sq1, Collections.singletonList(payload1));

  assertEquals(query1, query5);
  assertFalse(query1.equals(query2));
  assertFalse(query1.equals(query3));
  assertFalse(query1.equals(query4));
  assertFalse(query2.equals(query3));
  assertFalse(query2.equals(query4));
  assertFalse(query3.equals(query4));
}
 
源代码13 项目: lucene-solr   文件: SpanOrTermsBuilder.java
@Override
public SpanQuery getSpanQuery(Element e) throws ParserException {
  String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
  String value = DOMUtils.getNonBlankTextOrFail(e);

  List<SpanQuery> clausesList = new ArrayList<>();

  try (TokenStream ts = analyzer.tokenStream(fieldName, value)) {
    TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);
    ts.reset();
    while (ts.incrementToken()) {
      SpanTermQuery stq = new SpanTermQuery(new Term(fieldName, BytesRef.deepCopyOf(termAtt.getBytesRef())));
      clausesList.add(stq);
    }
    ts.end();
    SpanOrQuery soq = new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()]));
    float boost = DOMUtils.getAttribute(e, "boost", 1.0f);
    return new SpanBoostQuery(soq, boost);
  }
  catch (IOException ioe) {
    throw new ParserException("IOException parsing value:" + value);
  }
}
 
源代码14 项目: lucene-solr   文件: MultiTermHighlighting.java
@Override
public QueryVisitor getSubVisitor(BooleanClause.Occur occur, Query parent) {
  if (lookInSpan == false && parent instanceof SpanQuery) {
    return QueryVisitor.EMPTY_VISITOR;
  }
  return super.getSubVisitor(occur, parent);
}
 
源代码15 项目: lucene-solr   文件: MemoryIndexOffsetStrategy.java
/**
 * Build one {@link CharArrayMatcher} matching any term the query might match.
 */
private static CharArrayMatcher buildCombinedAutomaton(UHComponents components) {
  // We don't know enough about the query to do this confidently
  if (components.getTerms() == null || components.getAutomata() == null) {
    return null;
  }

  List<CharArrayMatcher> allAutomata = new ArrayList<>();
  if (components.getTerms().length > 0) {
    allAutomata.add(CharArrayMatcher.fromTerms(Arrays.asList(components.getTerms())));
  }
  Collections.addAll(allAutomata, components.getAutomata());
  for (SpanQuery spanQuery : components.getPhraseHelper().getSpanQueries()) {
    Collections.addAll(allAutomata,
        MultiTermHighlighting.extractAutomata(spanQuery, components.getFieldMatcher(), true));//true==lookInSpan
  }

  if (allAutomata.size() == 1) {
    return allAutomata.get(0);
  }

  //TODO it'd be nice if we could get at the underlying Automaton in CharacterRunAutomaton so that we
  //  could union them all. But it's not exposed, and sometimes the automaton is byte (not char) oriented

  // Return an aggregate CharArrayMatcher of others
  return (chars, offset, length) -> {
    for (int i = 0; i < allAutomata.size(); i++) {// don't use foreach to avoid Iterator allocation
      if (allAutomata.get(i).match(chars, offset, length)) {
        return true;
      }
    }
    return false;
  };
}
 
public SpanQuery getSpanQuery(Element e) throws ParserException {
  final String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
  final SpanQuery[] spanQueries = new SpanQuery[]{
      new SpanTermQuery(new Term(fieldName, "Apache")),
      new SpanTermQuery(new Term(fieldName, "Lucene")),
      new SpanTermQuery(new Term(fieldName, "Solr"))
  };
  final int slop = 42;
  final boolean inOrder = false;
  return new SpanNearQuery(spanQueries, slop, inOrder);
}
 
源代码17 项目: lucene-solr   文件: MissesTest.java
public void testSpanNearQuery() throws IOException, InvalidTokenOffsetsException {
  try (Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) {
    final Query query = new SpanNearQuery(new SpanQuery[] {
        new SpanTermQuery(new Term("test", "foo")),
        new SpanTermQuery(new Term("test", "bar"))}, 0, true);
    final Highlighter highlighter = new Highlighter(new SimpleHTMLFormatter(), new QueryScorer(query));
    assertEquals("this is a <B>foo</B> <B>bar</B> example",
        highlighter.getBestFragment(analyzer, "test", "this is a foo bar example"));
    assertNull(highlighter.getBestFragment(analyzer, "test", "this does not match"));
  }
}
 
源代码18 项目: lucene-solr   文件: HighlighterPhraseTest.java
public void testSparseSpan() throws IOException, InvalidTokenOffsetsException {
  final String TEXT = "the fox did not jump";
  final Directory directory = newDirectory();
  final IndexWriter indexWriter = new IndexWriter(directory,
      newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)));
  try {
    final Document document = new Document();
    FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
    customType.setStoreTermVectorOffsets(true);
    customType.setStoreTermVectorPositions(true);
    customType.setStoreTermVectors(true);
    document.add(new Field(FIELD, new TokenStreamSparse(), customType));
    indexWriter.addDocument(document);
  } finally {
    indexWriter.close();
  }
  final IndexReader indexReader = DirectoryReader.open(directory);
  try {
    assertEquals(1, indexReader.numDocs());
    final IndexSearcher indexSearcher = newSearcher(indexReader);
    final Query phraseQuery = new SpanNearQuery(new SpanQuery[] {
        new SpanTermQuery(new Term(FIELD, "did")),
        new SpanTermQuery(new Term(FIELD, "jump")) }, 0, true);

    TopDocs hits = indexSearcher.search(phraseQuery, 1);
    assertEquals(0, hits.totalHits.value);
    final Highlighter highlighter = new Highlighter(
        new SimpleHTMLFormatter(), new SimpleHTMLEncoder(),
        new QueryScorer(phraseQuery));
    final TokenStream tokenStream =
        TokenSources.getTermVectorTokenStreamOrNull(FIELD, indexReader.getTermVectors(0), -1);
    assertEquals(
        highlighter.getBestFragment(new TokenStreamSparse(), TEXT),
        highlighter.getBestFragment(tokenStream, TEXT));
  } finally {
    indexReader.close();
    directory.close();
  }
}
 
源代码19 项目: mtas   文件: MtasExtendedSpanAndQuery.java
@Override
public String toString(String field) {
  StringBuilder buffer = new StringBuilder();
  buffer.append(this.getClass().getSimpleName() + "([");
  Iterator<SpanQuery> i = localClauses.iterator();
  while (i.hasNext()) {
    SpanQuery clause = i.next();
    buffer.append(clause.toString(field));
    if (i.hasNext()) {
      buffer.append(", ");
    }
  }
  buffer.append("])");
  return buffer.toString();
}
 
/**
   * Like {@link #testRewriteAndMtq} but no freestanding MTQ
   */
  public void testRewrite() throws IOException {
    indexWriter.addDocument(newDoc("alpha bravo charlie - charlie bravo alpha"));
    initReaderSearcherHighlighter();

    SpanNearQuery snq = new SpanNearQuery(
        new SpanQuery[]{
            new SpanTermQuery(new Term("body", "bravo")),
            new SpanMultiTermQueryWrapper<>(new PrefixQuery(new Term("body", "ch")))}, // REWRITES
        0, true);
    BooleanQuery query = new BooleanQuery.Builder()
        .add(snq, BooleanClause.Occur.MUST)
//          .add(new PrefixQuery(new Term("body", "al")), BooleanClause.Occur.MUST) // MTQ
        .add(newPhraseQuery("body", "alpha bravo"), BooleanClause.Occur.MUST)
        // add queries for other fields; we shouldn't highlight these because of that.
        .add(newPhraseQuery("title", "bravo alpha"), BooleanClause.Occur.SHOULD)
        .build();

    TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    String[] snippets = highlighter.highlight("body", query, topDocs);

    if (highlighter.getFlags("body").contains(HighlightFlag.WEIGHT_MATCHES)) {
      assertArrayEquals(new String[]{"<b>alpha bravo</b> <b>charlie</b> - charlie bravo alpha"}, snippets);
    } else {
      assertArrayEquals(new String[]{"<b>alpha</b> <b>bravo</b> <b>charlie</b> - charlie bravo alpha"}, snippets);
    }

    // do again, this time with MTQ disabled.  We should only find "alpha bravo".
    highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);
    highlighter.setHandleMultiTermQuery(false);//disable but leave phrase processing enabled

    topDocs = searcher.search(query, 10, Sort.INDEXORDER);
    snippets = highlighter.highlight("body", query, topDocs);

    assertArrayEquals(new String[]{"<b>alpha</b> <b>bravo</b> charlie - charlie bravo alpha"},
        snippets);
  }
 
/**
 * Like {@link #testRewriteAndMtq} but no rewrite.
 */
public void testMtq() throws IOException {
  indexWriter.addDocument(newDoc("alpha bravo charlie - charlie bravo alpha"));
  initReaderSearcherHighlighter();

  SpanNearQuery snq = new SpanNearQuery(
      new SpanQuery[]{
          new SpanTermQuery(new Term("body", "bravo")),
          new SpanTermQuery(new Term("body", "charlie"))}, // does NOT rewrite
      0, true);

  BooleanQuery query = new BooleanQuery.Builder()
      .add(snq, BooleanClause.Occur.MUST)
      .add(new PrefixQuery(new Term("body", "al")), BooleanClause.Occur.MUST) // MTQ
      .add(newPhraseQuery("body", "alpha bravo"), BooleanClause.Occur.MUST)
      // add queries for other fields; we shouldn't highlight these because of that.
      .add(newPhraseQuery("title", "bravo alpha"), BooleanClause.Occur.SHOULD)
      .build();

  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  String[] snippets = highlighter.highlight("body", query, topDocs);

  if (highlighter.getFlags("body").contains(HighlightFlag.WEIGHT_MATCHES)) {
    assertArrayEquals(new String[]{"<b>alpha bravo</b> <b>charlie</b> - charlie bravo <b>alpha</b>"}, snippets);
  } else {
    assertArrayEquals(new String[]{"<b>alpha</b> <b>bravo</b> <b>charlie</b> - charlie bravo <b>alpha</b>"}, snippets);
  }

  // do again, this time with MTQ disabled.
  highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);
  highlighter.setHandleMultiTermQuery(false);//disable but leave phrase processing enabled

  topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  snippets = highlighter.highlight("body", query, topDocs);

  //note: without MTQ, the WEIGHT_MATCHES is disabled which affects the snippet boundaries
  assertArrayEquals(new String[]{"<b>alpha</b> <b>bravo</b> <b>charlie</b> - charlie bravo alpha"},
      snippets);
}
 
源代码22 项目: lucene-solr   文件: SpanNearClauseFactory.java
protected void addSpanQueryWeighted(SpanQuery sq, float weight) {
  Float w = weightBySpanQuery.get(sq);
  if (w != null)
    w = Float.valueOf(w.floatValue() + weight);
  else
    w = Float.valueOf(weight);
  weightBySpanQuery.put(sq, w); 
}
 
源代码23 项目: lucene-solr   文件: TestUnifiedHighlighterMTQ.java
public void testSpanNear() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  SpanQuery childQuery = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*")));
  Query query = new SpanNearQuery(new SpanQuery[]{childQuery, childQuery}, 0, false);
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  ir.close();
}
 
源代码24 项目: lucene-solr   文件: TestUnifiedHighlighterMTQ.java
public void testSpanNot() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  SpanQuery include = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*")));
  SpanQuery exclude = new SpanTermQuery(new Term("body", "bogus"));
  Query query = new SpanNotQuery(include, exclude);
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  ir.close();
}
 
源代码25 项目: lucene-solr   文件: TestCoreParser.java
public void testSpanPositionRangeQueryXML() throws Exception {
  Query q = parse("SpanPositionRangeQuery.xml");
  long h = searcher().search(q, 10).totalHits.value;
  assertEquals("SpanPositionRangeQuery should produce 2 result ", 2, h);
  SpanQuery sq = parseAsSpan("SpanPositionRangeQuery.xml");
  dumpResults("SpanPositionRangeQuery", sq, 5);
  assertEquals(q, sq);
}
 
源代码26 项目: lucene-solr   文件: TestUnifiedHighlighterMTQ.java
@Override
public Query rewrite(IndexReader reader) throws IOException {
  Query newOriginalQuery = originalQuery.rewrite(reader);
  if (newOriginalQuery != originalQuery) {
    return new MyWrapperSpanQuery((SpanQuery)newOriginalQuery);
  }
  return this;
}
 
源代码27 项目: lucene-solr   文件: ReutersQueryMaker.java
private static Query[] getPrebuiltQueries(String field) {
  //  be wary of unanalyzed text
  return new Query[] {
      new SpanFirstQuery(new SpanTermQuery(new Term(field, "ford")), 5),
      new SpanNearQuery(new SpanQuery[]{new SpanTermQuery(new Term(field, "night")), new SpanTermQuery(new Term(field, "trading"))}, 4, false),
      new SpanNearQuery(new SpanQuery[]{new SpanFirstQuery(new SpanTermQuery(new Term(field, "ford")), 10), new SpanTermQuery(new Term(field, "credit"))}, 10, false),
      new WildcardQuery(new Term(field, "fo*")),
  };
}
 
源代码28 项目: lucene-solr   文件: EnwikiQueryMaker.java
private static Query[] getPrebuiltQueries(String field) {
  WildcardQuery wcq = new WildcardQuery(new Term(field, "fo*"));
  wcq .setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE);
  // be wary of unanalyzed text
  return new Query[] {
      new SpanFirstQuery(new SpanTermQuery(new Term(field, "ford")), 5),
      new SpanNearQuery(new SpanQuery[] {
          new SpanTermQuery(new Term(field, "night")),
          new SpanTermQuery(new Term(field, "trading")) }, 4, false),
      new SpanNearQuery(new SpanQuery[] {
          new SpanFirstQuery(new SpanTermQuery(new Term(field, "ford")), 10),
          new SpanTermQuery(new Term(field, "credit")) }, 10, false), wcq, };
}
 
public void testNullPointerException() throws IOException {
  RegexpQuery regex = new RegexpQuery(new Term("field", "worl."));
  SpanQuery wrappedquery = new SpanMultiTermQueryWrapper<>(regex);
      
  MemoryIndex mindex = randomMemoryIndex();
  mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", "hello there"));

  // This throws an NPE
  assertEquals(0, mindex.search(wrappedquery), 0.00001f);
  TestUtil.checkReader(mindex.createSearcher().getIndexReader());
}
 
public void testPassesIfWrapped() throws IOException {
  RegexpQuery regex = new RegexpQuery(new Term("field", "worl."));
  SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper<>(regex));

  MemoryIndex mindex = randomMemoryIndex();
  mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", "hello there"));

  // This passes though
  assertEquals(0, mindex.search(wrappedquery), 0.00001f);
  TestUtil.checkReader(mindex.createSearcher().getIndexReader());
}
 
 类所在包
 同包方法