下面列出了怎么用org.apache.lucene.search.TermQuery的API类实例代码及写法,或者点击链接到github查看源代码。
public void testQueryImplicitDefaultParams() throws IOException {
IndexSearcher searcher = newSearcher(reader);
// Create the base query to start with
DrillDownQuery q = new DrillDownQuery(config);
q.add("a");
// Making sure the query yields 5 documents with the facet "b" and the
// previous (facet "a") query as a base query
DrillDownQuery q2 = new DrillDownQuery(config, q);
q2.add("b");
TopDocs docs = searcher.search(q2, 100);
assertEquals(5, docs.totalHits.value);
// Check that content:foo (which yields 50% results) and facet/b (which yields 20%)
// would gather together 10 results (10%..)
Query fooQuery = new TermQuery(new Term("content", "foo"));
DrillDownQuery q4 = new DrillDownQuery(config, fooQuery);
q4.add("b");
docs = searcher.search(q4, 100);
assertEquals(10, docs.totalHits.value);
}
public void testSetAllGroups() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(
random(),
dir,
newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
Document doc = new Document();
doc.add(newField("group", "foo", StringField.TYPE_NOT_STORED));
doc.add(new SortedDocValuesField("group", new BytesRef("foo")));
w.addDocument(doc);
IndexSearcher indexSearcher = newSearcher(w.getReader());
w.close();
GroupingSearch gs = new GroupingSearch("group");
gs.setAllGroups(true);
TopGroups<?> groups = gs.search(indexSearcher, new TermQuery(new Term("group", "foo")), 0, 10);
assertEquals(1, groups.totalHitCount);
//assertEquals(1, groups.totalGroupCount.intValue());
assertEquals(1, groups.totalGroupedHitCount);
assertEquals(1, gs.getAllMatchingGroups().size());
indexSearcher.getIndexReader().close();
dir.close();
}
/** test a fuzzy query */
public void testFuzzy() throws Exception {
Query regular = new TermQuery(new Term("field", "foobar"));
Query expected = new FuzzyQuery(new Term("field", "foobar"), 2);
assertEquals(expected, parse("foobar~2"));
assertEquals(expected, parse("foobar~"));
assertEquals(regular, parse("foobar~a"));
assertEquals(regular, parse("foobar~1a"));
BooleanQuery.Builder bool = new BooleanQuery.Builder();
FuzzyQuery fuzzy = new FuzzyQuery(new Term("field", "foo"), LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE);
bool.add(fuzzy, Occur.MUST);
bool.add(new TermQuery(new Term("field", "bar")), Occur.MUST);
assertEquals(bool.build(), parse("foo~" + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + 1 + " bar"));
}
public void testPhraseNotInDoc() throws IOException {
indexWriter.addDocument(newDoc("Whatever yin")); // query matches this; highlight it
indexWriter.addDocument(newDoc("nextdoc yin"));// query does NOT match this, only the SHOULD clause does
initReaderSearcherHighlighter();
BooleanQuery query = new BooleanQuery.Builder()
//MUST:
.add(new TermQuery(new Term("body", "whatever")), BooleanClause.Occur.MUST)
//SHOULD: (yet won't)
.add(newPhraseQuery("body", "nextdoc yin"), BooleanClause.Occur.SHOULD)
.add(newPhraseQuery("body", "nonexistent yin"), BooleanClause.Occur.SHOULD)
.build();
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
String[] snippets = highlighter.highlight("body", query, topDocs);
assertArrayEquals(new String[]{"<b>Whatever</b> yin"}, snippets);
}
static Query binaryNameQuery (final String resourceName) {
final BooleanQuery query = new BooleanQuery ();
int index = resourceName.lastIndexOf(BinaryName.PKG_SEPARATOR); // NOI18N
String pkgName, sName;
if (index < 0) {
pkgName = ""; // NOI18N
sName = resourceName;
}
else {
pkgName = resourceName.substring(0,index);
sName = resourceName.substring(index+1);
}
sName = sName + WILDCARD_QUERY_WILDCARD; //Type of type element (Enum, Class, Interface, Annotation)
query.add (new TermQuery (new Term (FIELD_PACKAGE_NAME, pkgName)),BooleanClause.Occur.MUST);
query.add (new WildcardQuery (new Term (FIELD_BINARY_NAME, sName)),BooleanClause.Occur.MUST);
return query;
}
@Test
public void test4() throws ParseException {
Query query = parser.parse("<a.a:a a.d:e a.b:b> -<b.c:c b.d:d>");
BooleanQuery booleanQuery1 = new BooleanQuery();
booleanQuery1.add(new TermQuery(new Term("a.a", "a")), Occur.SHOULD);
booleanQuery1.add(new TermQuery(new Term("a.d", "e")), Occur.SHOULD);
booleanQuery1.add(new TermQuery(new Term("a.b", "b")), Occur.SHOULD);
BooleanQuery booleanQuery2 = new BooleanQuery();
booleanQuery2.add(new TermQuery(new Term("b.c", "c")), Occur.SHOULD);
booleanQuery2.add(new TermQuery(new Term("b.d", "d")), Occur.SHOULD);
SuperQuery superQuery1 = new SuperQuery(booleanQuery1, ScoreType.SUPER, new Term("_primedoc_"));
SuperQuery superQuery2 = new SuperQuery(booleanQuery2, ScoreType.SUPER, new Term("_primedoc_"));
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(superQuery1, Occur.SHOULD);
booleanQuery.add(superQuery2, Occur.MUST_NOT);
assertQuery(booleanQuery, query);
}
/**
* Make sure highlighter returns first N sentences if
* there were no hits.
*/
public void testEmptyHighlights() throws Exception {
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
Document doc = new Document();
Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", fieldType);
doc.add(body);
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
Query query = new TermQuery(new Term("body", "highlighting"));
int[] docIDs = new int[]{0};
String snippets[] = highlighter.highlightFields(new String[]{"body"}, query, docIDs, new int[]{2}).get("body");
assertEquals(1, snippets.length);
assertEquals("test this is. another sentence this test has. ", snippets[0]);
ir.close();
}
public void testIdenticalMatches() throws Exception {
final BooleanQuery bq = new BooleanQuery.Builder()
.add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.MUST)
.add(new TermQuery(new Term(FIELD, "term1")), BooleanClause.Occur.SHOULD)
.build();
try (Monitor monitor = new Monitor(ANALYZER)) {
monitor.register(new MonitorQuery("1", bq));
MatchingQueries<HighlightsMatch> matches = monitor.match(buildDoc("term1 term2"), HighlightsMatch.MATCHER);
HighlightsMatch m = matches.matches("1");
assertNotNull(m);
assertEquals(1, m.getHitCount());
}
}
@Test
public void testExtractSubqueryField() {
Query q1 = new TermQuery(new Term("field1", "value1"));
Query q2 = new TermQuery(new Term("field2", "value2"));
DisjunctionQueryExtractor disjunctionQueryExtracotr = new DisjunctionQueryExtractor();
List<Query> disjunctQueries = new ArrayList<>();
disjunctQueries.add(q1);
disjunctQueries.add(q2);
DisjunctionMaxQuery disjunctionMaxQuery = new DisjunctionMaxQuery(disjunctQueries, 0.0f);
Set<String> extractedFieldNames = new HashSet<>();
disjunctionQueryExtracotr.extractSubQueriesFields(disjunctionMaxQuery, DEFAULT_EXTRACTORS, extractedFieldNames);
assertEquals(2, extractedFieldNames.size());
assertTrue(extractedFieldNames.contains("field1"));
assertTrue(extractedFieldNames.contains("field2"));
}
@Override
void flatten(Query sourceQuery, IndexReader reader, Collection<Query> flatQueries, float boost) throws IOException {
if (sourceQuery instanceof SpanTermQuery) {
super.flatten(new TermQuery(((SpanTermQuery) sourceQuery).getTerm()), reader, flatQueries, boost);
} else if (sourceQuery instanceof ConstantScoreQuery) {
flatten(((ConstantScoreQuery) sourceQuery).getQuery(), reader, flatQueries, boost);
} else if (sourceQuery instanceof FunctionScoreQuery) {
flatten(((FunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
} else if (sourceQuery instanceof MultiPhrasePrefixQuery) {
flatten(sourceQuery.rewrite(reader), reader, flatQueries, boost);
} else if (sourceQuery instanceof FiltersFunctionScoreQuery) {
flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
} else if (sourceQuery instanceof MultiPhraseQuery) {
MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
} else if (sourceQuery instanceof BlendedTermQuery) {
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
} else {
super.flatten(sourceQuery, reader, flatQueries, boost);
}
}
public void testOffByOne() throws Exception {
TestHighlightRunner helper = new TestHighlightRunner() {
@Override
public void run() throws Exception {
TermQuery query = new TermQuery(new Term("data", "help"));
Highlighter hg = new Highlighter(new SimpleHTMLFormatter(), new QueryTermScorer(query));
hg.setTextFragmenter(new NullFragmenter());
String match = hg.getBestFragment(analyzer, "data", "help me [54-65]");
assertEquals("<B>help</B> me [54-65]", match);
}
};
helper.start();
}
/**
* Constructs an elevation.
*
* @param elevatedIds The ids of the elevated documents that should appear on top of search results, in configured order;
* can be <code>null</code>.
* @param excludedIds The ids of the excluded documents that should not appear in search results; can be <code>null</code>.
* @param queryFieldName The field name to use to create query terms.
*/
public Elevation(Set<BytesRef> elevatedIds, Set<BytesRef> excludedIds, String queryFieldName) {
if (elevatedIds == null || elevatedIds.isEmpty()) {
includeQuery = EMPTY_QUERY;
this.elevatedIds = Collections.emptySet();
} else {
this.elevatedIds = ImmutableSet.copyOf(elevatedIds);
BooleanQuery.Builder includeQueryBuilder = new BooleanQuery.Builder();
for (BytesRef elevatedId : elevatedIds) {
includeQueryBuilder.add(new TermQuery(new Term(queryFieldName, elevatedId)), BooleanClause.Occur.SHOULD);
}
includeQuery = includeQueryBuilder.build();
}
if (excludedIds == null || excludedIds.isEmpty()) {
this.excludedIds = Collections.emptySet();
excludeQueries = null;
} else {
this.excludedIds = ImmutableSet.copyOf(excludedIds);
List<TermQuery> excludeQueriesBuilder = new ArrayList<>(excludedIds.size());
for (BytesRef excludedId : excludedIds) {
excludeQueriesBuilder.add(new TermQuery(new Term(queryFieldName, excludedId)));
}
excludeQueries = excludeQueriesBuilder.toArray(new TermQuery[0]);
}
}
public static SolrOwnerScorer createOwnerScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException
{
if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER)
{
DocSet ownedDocs = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority);
if (ownedDocs == null)
{
// Cache miss: query the index for docs where the owner matches the authority.
ownedDocs = searcher.getDocSet(new TermQuery(new Term(QueryConstants.FIELD_OWNER, authority)));
searcher.cacheInsert(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority, ownedDocs);
}
return new SolrOwnerScorer(weight, ownedDocs, context, searcher);
}
// Return an empty doc set, as the authority isn't a user.
return new SolrOwnerScorer(weight, new BitDocSet(new FixedBitSet(0)), context, searcher);
}
private static BooleanQuery constructDefaultLocaleHandlingQuery(
String fieldName, String locale, String defaultLocale,
String searchPhrase) {
BooleanQuery bq1 = new BooleanQuery();
TermQuery tq1 = new TermQuery(
new Term(fieldName + ProductClassBridge.DEFINED_LOCALES_SUFFIX,
defaultLocale));
TermQuery tq2 = new TermQuery(new Term(
fieldName + ProductClassBridge.DEFINED_LOCALES_SUFFIX, locale));
bq1.add(tq1, Occur.MUST);
bq1.add(tq2, Occur.MUST_NOT);
BooleanQuery bq2 = new BooleanQuery();
WildcardQuery wq1 = new WildcardQuery(
new Term(fieldName + defaultLocale,
"*" + searchPhrase.toLowerCase() + "*"));
bq2.add(wq1, Occur.SHOULD);
BooleanQuery finalQuery = new BooleanQuery();
finalQuery.add(bq1, Occur.MUST);
finalQuery.add(bq2, Occur.MUST);
return finalQuery;
}
public void testIntervalDisjunction() throws IOException {
IntervalsSource source = Intervals.or(Intervals.term("pease"), Intervals.term("hot"), Intervals.term("notMatching"));
checkIntervals(source, "field1", 4, new int[][]{
{},
{ 0, 0, 2, 2, 3, 3, 6, 6, 17, 17},
{ 0, 0, 3, 3, 5, 5, 6, 6, 21, 21},
{ 3, 3, 7, 7 },
{ 0, 0, 2, 2, 3, 3, 6, 6, 17, 17},
{}
});
assertNull(getMatches(source, 0, "field1"));
MatchesIterator mi = getMatches(source, 3, "field1");
assertMatch(mi, 3, 3, 15, 18);
assertEquals(new TermQuery(new Term("field1","hot")), mi.getQuery());
assertNull(mi.getSubMatches());
assertMatch(mi, 7, 7, 31, 36);
assertEquals(new TermQuery(new Term("field1","pease")), mi.getQuery());
assertNull(mi.getSubMatches());
assertFalse(mi.next());
assertEquals(1, source.minExtent());
checkVisits(source, 4, "pease", "hot", "notMatching");
}
public void testHighlightLastWord() throws Exception {
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
Field body = new Field("body", "", fieldType);
Document doc = new Document();
doc.add(body);
body.setStringValue("This is a test");
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
Query query = new TermQuery(new Term("body", "test"));
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
assertEquals(1, topDocs.totalHits.value);
String snippets[] = highlighter.highlight("body", query, topDocs);
assertEquals(1, snippets.length);
assertEquals("This is a <b>test</b>", snippets[0]);
ir.close();
}
@Override
public void testNewFieldQuery() throws Exception {
/** ordinary behavior, synonyms form uncoordinated boolean query */
StandardQueryParser dumb = getParser(new Analyzer1());
BooleanQuery.Builder expanded = new BooleanQuery.Builder();
expanded.add(new TermQuery(new Term("field", "dogs")),
BooleanClause.Occur.SHOULD);
expanded.add(new TermQuery(new Term("field", "dog")),
BooleanClause.Occur.SHOULD);
assertEquals(expanded.build(), dumb.parse("\"dogs\"","field"));
/** even with the phrase operator the behavior is the same */
assertEquals(expanded.build(), dumb.parse("dogs","field"));
/**
* custom behavior, the synonyms are expanded, unless you use quote operator
*/
//TODO test something like "SmartQueryParser()"
}
public DocListAndSet getMoreLikeThis( int id, int start, int rows, List<Query> filters, List<InterestingTerm> terms, int flags ) throws IOException
{
Document doc = reader.document(id);
rawMLTQuery = mlt.like(id);
boostedMLTQuery = getBoostedQuery( rawMLTQuery );
if( terms != null ) {
fillInterestingTermsFromMLTQuery( boostedMLTQuery, terms );
}
// exclude current document from results
BooleanQuery.Builder realMLTQuery = new BooleanQuery.Builder();
realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
realMLTQuery.add(
new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))),
BooleanClause.Occur.MUST_NOT);
this.realMLTQuery = realMLTQuery.build();
DocListAndSet results = new DocListAndSet();
if (this.needDocSet) {
results = searcher.getDocListAndSet(this.realMLTQuery, filters, null, start, rows, flags);
} else {
results.docList = searcher.getDocList(this.realMLTQuery, filters, null, start, rows, flags);
}
return results;
}
private Query getBoostedQuery(Query mltquery) {
BooleanQuery boostedQuery = (BooleanQuery)mltquery;
if (boostFields.size() > 0) {
BooleanQuery.Builder newQ = new BooleanQuery.Builder();
newQ.setMinimumNumberShouldMatch(boostedQuery.getMinimumNumberShouldMatch());
for (BooleanClause clause : boostedQuery) {
Query q = clause.getQuery();
float originalBoost = 1f;
if (q instanceof BoostQuery) {
BoostQuery bq = (BoostQuery) q;
q = bq.getQuery();
originalBoost = bq.getBoost();
}
Float fieldBoost = boostFields.get(((TermQuery) q).getTerm().field());
q = ((fieldBoost != null) ? new BoostQuery(q, fieldBoost * originalBoost) : clause.getQuery());
newQ.add(q, clause.getOccur());
}
boostedQuery = newQ.build();
}
return boostedQuery;
}
public void testQueryWithTermPositionAvgWithTwoTerms() throws Exception {
TermQuery tq1 = new TermQuery(new Term("text", "stop"));
TermQuery tq2 = new TermQuery(new Term("text", "hip-hop"));
TermQuery tq3 = new TermQuery(new Term("text", "monkeys"));
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(tq1, BooleanClause.Occur.SHOULD);
builder.add(tq2, BooleanClause.Occur.SHOULD);
builder.add(tq3, BooleanClause.Occur.SHOULD);
Query q = builder.build();
String statsType = "avg_raw_tp";
ExplorerQuery eq = new ExplorerQuery(q, statsType);
// Verify score is 5 (5 unique terms)
TopDocs docs = searcher.search(eq, 4);
assertThat(docs.scoreDocs[0].score, equalTo(5.0f));
}
/**
* @decription:根据关键词查询
* @parm:@param keywords
* @return:TopDocs
* @throws:IOException
* @throws:ParseException
*/
public TopDocs searcher(String keywords) throws IOException, ParseException {
Directory directory = FSDirectory.open(new File(Constant.INDEXDIR));
TopDocs topDocs =null;
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
Query query = new TermQuery(new Term("title",
keywords));
// 检索符合query条件的前n条记录
topDocs = indexSearcher.search(query, 10);
System.out.println("返回总记录数" + topDocs.totalHits);
ScoreDoc scoreDocs[] = topDocs.scoreDocs;
for (ScoreDoc scoreDoc : scoreDocs) {
int docID = scoreDoc.doc;
// 根据id检索document
Document document = indexSearcher.doc(docID);
System.out.println("标题:"+document.get("title"));
System.out.println("内容:"+document.get("content"));
System.out.println("-----------------------------------------------------");
}
return topDocs;
}
private Query fixNegatives(Query query) {
if (query instanceof SuperQuery) {
SuperQuery superQuery = (SuperQuery) query;
fixNegatives(superQuery.getQuery());
} else if (query instanceof BooleanQuery) {
BooleanQuery booleanQuery = (BooleanQuery) query;
for (BooleanClause clause : booleanQuery.clauses()) {
fixNegatives(clause.getQuery());
}
if (containsAllNegativeQueries(booleanQuery)) {
if (containsSuperQueries(booleanQuery)) {
booleanQuery.add(new TermQuery(_defaultPrimeDocTerm), Occur.SHOULD);
} else {
booleanQuery.add(new MatchAllDocsQuery(), Occur.SHOULD);
}
}
}
return query;
}
@Test(expected = IllegalArgumentException.class)
public void testUserFailedToIndexOffsets() throws IOException {
FieldType fieldType = new FieldType(UHTestHelper.tvType); // note: it's indexed too
fieldType.setStoreTermVectorPositions(random().nextBoolean());
fieldType.setStoreTermVectorOffsets(false);
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
Document doc = new Document();
doc.add(new Field("body", "term vectors", fieldType));
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer) {
@Override
protected Set<HighlightFlag> getFlags(String field) {
return Collections.emptySet();//no WEIGHT_MATCHES
}
};
TermQuery query = new TermQuery(new Term("body", "vectors"));
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
try {
highlighter.highlight("body", query, topDocs, 1);//should throw
} finally {
ir.close();
}
}
/**
* Builds a new TermQuery instance.
* <p>
* This is intended for subclasses that wish to customize the generated queries.
* @param term term
* @return new TermQuery instance
*/
protected Query newTermQuery(Term term, float boost) {
Query q = new TermQuery(term);
if (boost == DEFAULT_BOOST) {
return q;
}
return new BoostQuery(q, boost);
}
private Document getDoc(String id) throws IOException {
try (SolrQueryRequest req = req()) {
SolrIndexSearcher searcher = req.getSearcher();
TermQuery query = new TermQuery(new Term(ID, id));
TopDocs doc1 = searcher.search(query, 1);
ScoreDoc scoreDoc = doc1.scoreDocs[0];
return searcher.doc(scoreDoc.doc);
}
}
public void testFilterAndShouldClause() {
final Query shouldTermQuery = new TermQuery(new Term("f", "should"));
final Query filterTermQuery = new TermQuery(new Term("f", "filter"));
Query q = new BooleanQuery.Builder()
.add(shouldTermQuery, BooleanClause.Occur.SHOULD)
.add(filterTermQuery, BooleanClause.Occur.FILTER)
.build();
assertEquals(Collections.singleton(q), decomposer.decompose(q));
}
public void testSpecificDocIDs() throws Exception {
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
Field body = new Field("body", "", fieldType);
Document doc = new Document();
doc.add(body);
body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
iw.addDocument(doc);
body.setStringValue("Highlighting the first term. Hope it works.");
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
Query query = new TermQuery(new Term("body", "highlighting"));
TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
assertEquals(2, topDocs.totalHits.value);
ScoreDoc[] hits = topDocs.scoreDocs;
int[] docIDs = new int[2];
docIDs[0] = hits[0].doc;
docIDs[1] = hits[1].doc;
String snippets[] = highlighter.highlightFields(new String[]{"body"}, query, docIDs, new int[]{1}).get("body");
assertEquals(2, snippets.length);
assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
ir.close();
}
@Override
public Optional<String> getTimestamp(AnnotationDocument aDocument) throws IOException
{
Optional<String> result = Optional.empty();
// Prepare index searcher for accessing index
Directory directory = FSDirectory.open(getIndexDir().toPath());
IndexReader indexReader = DirectoryReader.open(directory);
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
// Prepare query for the annotation document for this annotation document
Term term = new Term(FIELD_ID,
String.format("%d/%d", aDocument.getDocument().getId(), aDocument.getId()));
TermQuery query = new TermQuery(term);
// Do query
TopDocs docs = indexSearcher.search(query, 1);
if (docs.scoreDocs.length > 0) {
// If there are results, retrieve first document, since all results should come
// from the same document
Document document = indexSearcher.doc(docs.scoreDocs[0].doc);
// Retrieve the timestamp field if it exists
if (document.getField(FIELD_TIMESTAMP) != null) {
result = Optional.ofNullable(StringUtils
.trimToNull(document.getField(FIELD_TIMESTAMP).stringValue()));
}
}
return result;
}
public void testSortByRelevance() throws IOException {
Shard shard = new Shard();
indexRandomDocs(shard.writer);
String[] query = new String[]{ "foo", "bar", "baz" };
Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)]));
IndexSearcher searcher = shard.getIndexSearcher();
GroupingSearch grouper = new GroupingSearch(getGroupSelector());
grouper.setGroupDocsLimit(10);
TopGroups<T> topGroups = grouper.search(searcher, topLevel, 0, 5);
TopDocs topDoc = searcher.search(topLevel, 1);
for (int i = 0; i < topGroups.groups.length; i++) {
// Each group should have a result set equal to that returned by the top-level query,
// filtered by the group value.
Query filtered = new BooleanQuery.Builder()
.add(topLevel, BooleanClause.Occur.MUST)
.add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER)
.build();
TopDocs td = searcher.search(filtered, 10);
assertScoreDocsEquals(topGroups.groups[i].scoreDocs, td.scoreDocs);
if (i == 0) {
assertEquals(td.scoreDocs[0].doc, topDoc.scoreDocs[0].doc);
assertEquals(td.scoreDocs[0].score, topDoc.scoreDocs[0].score, 0);
}
}
shard.close();
}
public void
testRedisDirectoryWithRemoteJedisPool() throws IOException {
long start = System.currentTimeMillis();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
.OpenMode.CREATE);
JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "10.97.19.55", 6379, Constants.TIME_OUT);
RedisDirectory redisDirectory = new RedisDirectory(new JedisPoolStream(jedisPool));
IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
for (int i = 0; i < 5000000; i++) {
indexWriter.addDocument(addDocument(i));
}
indexWriter.commit();
indexWriter.close();
redisDirectory.close();
long end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool consumes {}s!", (end - start) / 1000);
start = System.currentTimeMillis();
IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
6379))));
int total = 0;
for (int i = 0; i < 1000000; i++) {
TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
TopDocs search = indexSearcher.search(key1, 10);
total += search.totalHits;
}
System.out.println(total);
end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool search consumes {}ms!", (end - start));
}