类org.apache.lucene.search.IndexSearcher源码实例Demo

下面列出了怎么用org.apache.lucene.search.IndexSearcher的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: BaseXYPointTestCase.java
/** test we can search for a polygon with a hole (but still includes the doc) */
public void testPolygonHole() throws Exception {
  assumeTrue("Impl does not support polygons", supportsPolygons());
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with a point
  Document document = new Document();
  addPointToDoc("field", document, 18.313694f, -65.227444f);
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  XYPolygon inner = new XYPolygon(new float[] { 18.5f, 18.5f, 18.7f, 18.7f, 18.5f },
                              new float[] { -65.7f, -65.4f, -65.4f, -65.7f, -65.7f });
  XYPolygon outer = new XYPolygon(new float[] { 18, 18, 19, 19, 18 },
                              new float[] { -66, -65, -65, -66, -66 }, inner);
  assertEquals(1, searcher.count(newPolygonQuery("field", outer)));

  reader.close();
  writer.close();
  dir.close();
}
 
源代码2 项目: james-project   文件: LuceneMessageSearchIndex.java
private Flags retrieveFlags(Mailbox mailbox, MessageUid uid) throws IOException {
    try (IndexSearcher searcher = new IndexSearcher(IndexReader.open(writer, true))) {
        Flags retrievedFlags = new Flags();

        BooleanQuery query = new BooleanQuery();
        query.add(new TermQuery(new Term(MAILBOX_ID_FIELD, mailbox.getMailboxId().serialize())), BooleanClause.Occur.MUST);
        query.add(createQuery(MessageRange.one(uid)), BooleanClause.Occur.MUST);
        query.add(new PrefixQuery(new Term(FLAGS_FIELD, "")), BooleanClause.Occur.MUST);

        TopDocs docs = searcher.search(query, 100000);
        ScoreDoc[] sDocs = docs.scoreDocs;
        for (ScoreDoc sDoc : sDocs) {
            Document doc = searcher.doc(sDoc.doc);

            Stream.of(doc.getValues(FLAGS_FIELD))
                .forEach(flag -> fromString(flag).ifPresentOrElse(retrievedFlags::add, () -> retrievedFlags.add(flag)));
        }
        return retrievedFlags;
    }
}
 
源代码3 项目: uyuni   文件: NGramQueryParserTest.java
public Hits performSearch(Directory dir, String query, boolean useMust)
    throws Exception {

    NGramQueryParser parser = new NGramQueryParser("name",
            new NGramAnalyzer(min_ngram, max_ngram), useMust);
    IndexSearcher searcher = new IndexSearcher(dir);
    Query q = parser.parse(query);
    Hits hits = searcher.search(q);
    log.info("Original Query = " + query);
    log.info("Parsed Query = " + q.toString());
    log.info("Hits.length() = " + hits.length());
    for (int i=0; i < hits.length(); i++) {
        log.debug("Document<"+hits.id(i)+"> = " + hits.doc(i));
        //Explanation explain = searcher.explain(q, hits.id(i));
        //log.debug("explain = " + explain.toString());
    }
    return hits;
}
 
源代码4 项目: crate   文件: IpColumnReferenceTest.java
@Test
public void testIpExpression() throws Exception {
    IpColumnReference columnReference = new IpColumnReference(IP_COLUMN);
    columnReference.startCollect(ctx);
    columnReference.setNextReader(readerContext);
    IndexSearcher searcher = new IndexSearcher(readerContext.reader());
    TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 21);
    assertThat(topDocs.scoreDocs.length, is(21));

    int i = 0;
    for (ScoreDoc doc : topDocs.scoreDocs) {
        columnReference.setNextDocId(doc.doc);
        if (i == 20) {
            assertThat(columnReference.value(), is(nullValue()));
        } else if (i < 10) {
            assertThat(columnReference.value(), is("192.168.0." + i));
        } else {
            assertThat(columnReference.value(),
                is("7bd0:8082:2df8:487e:e0df:e7b5:9362:" + Integer.toHexString(i)));
        }
        i++;
    }
}
 
源代码5 项目: lucene-solr   文件: CompositeVerifyQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight indexQueryWeight = indexQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);//scores aren't unsupported

  return new ConstantScoreWeight(this, boost) {

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {

      final Scorer indexQueryScorer = indexQueryWeight.scorer(context);
      if (indexQueryScorer == null) {
        return null;
      }

      final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, indexQueryScorer.iterator());
      return new ConstantScoreScorer(this, score(), scoreMode, predFuncValues);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return predicateValueSource.isCacheable(ctx);
    }

  };
}
 
源代码6 项目: lucene-solr   文件: TestSelectiveWeightCreation.java
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits,
    IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException,
    ModelException {
  final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
      .leaves();
  final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
  final LeafReaderContext context = leafContexts.get(n);
  final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

  final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(context);

  // rerank using the field final-score
  scorer.iterator().advance(deBasedDoc);
  scorer.score();
  assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
  final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
  return modelWeight;

}
 
private static Facets getAllFacets(IndexSearcher searcher, SortedSetDocValuesReaderState state,
                                   ExecutorService exec) throws IOException, InterruptedException {
  if (random().nextBoolean()) {
    FacetsCollector c = new FacetsCollector();
    searcher.search(new MatchAllDocsQuery(), c);
    if (exec != null) {
      return new ConcurrentSortedSetDocValuesFacetCounts(state, c, exec);
    } else {
      return new SortedSetDocValuesFacetCounts(state, c);
    }
  } else if (exec != null) {
    return new ConcurrentSortedSetDocValuesFacetCounts(state, exec);
  } else {
    return new SortedSetDocValuesFacetCounts(state);
  }
}
 
源代码8 项目: lucene-solr   文件: TestSimilarity2.java
/** similar to the above, but ORs the query with a real field */
public void testEmptyField() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();
  IndexSearcher is = newSearcher(ir);
  
  for (Similarity sim : sims) {
    is.setSimilarity(sim);
    BooleanQuery.Builder query = new BooleanQuery.Builder();
    query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
    query.add(new TermQuery(new Term("bar", "baz")), BooleanClause.Occur.SHOULD);
    assertEquals(1, is.search(query.build(), 10).totalHits.value);
  }
  ir.close();
  dir.close();
}
 
源代码9 项目: crate   文件: LuceneOrderedDocCollectorTest.java
@Test
public void testSearchWithScores() throws Exception {
    IndexWriter w = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
    KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType();
    fieldType.setName("x");
    fieldType.freeze();

    for (int i = 0; i < 3; i++) {
        addDoc(w, fieldType, "Arthur");
    }
    addDoc(w, fieldType, "Arthur"); // not "Arthur" to lower score
    w.commit();
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w, true, true));

    List<LuceneCollectorExpression<?>> columnReferences = Collections.singletonList(new ScoreCollectorExpression());
    Query query = fieldType.termsQuery(Collections.singletonList("Arthur"), null);
    LuceneOrderedDocCollector collector = collector(searcher, columnReferences, query, null, true);
    KeyIterable<ShardId, Row> result = collector.collect();

    assertThat(Iterables.size(result), is(2));

    Iterator<Row> values = result.iterator();

    assertThat(values.next().get(0), Matchers.is(1.0F));
    assertThat(values.next().get(0), Matchers.is(1.0F));
}
 
源代码10 项目: cqunews-web   文件: SearchDao.java
/**
 * @decription:根据关键词查询
 * @parm:@param keywords
 * @return:TopDocs
 * @throws:IOException
 * @throws:ParseException
 */
public TopDocs searcher(String keywords) throws IOException, ParseException {
	Directory directory = FSDirectory.open(new File(Constant.INDEXDIR));
	TopDocs topDocs =null;
	
	IndexReader indexReader = DirectoryReader.open(directory);
	IndexSearcher indexSearcher = new IndexSearcher(indexReader);
	
	Query query = new TermQuery(new Term("title",
			keywords));
	// 检索符合query条件的前n条记录
	topDocs = indexSearcher.search(query, 10);
	System.out.println("返回总记录数" + topDocs.totalHits);
	ScoreDoc scoreDocs[] = topDocs.scoreDocs;
	for (ScoreDoc scoreDoc : scoreDocs) {
		
		int docID = scoreDoc.doc;
		// 根据id检索document
		Document document = indexSearcher.doc(docID);
		System.out.println("标题:"+document.get("title"));
		System.out.println("内容:"+document.get("content"));
		System.out.println("-----------------------------------------------------");
	}
	return topDocs;
}
 
源代码11 项目: lucene-solr   文件: FuzzyCompletionQuery.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Automaton originalAutomata;
  try (CompletionTokenStream stream = (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text()) ) {
    originalAutomata = stream.toAutomaton(unicodeAware);
  }
  Set<IntsRef> refs = new HashSet<>();
  Automaton automaton = toLevenshteinAutomata(originalAutomata, refs);
  if (unicodeAware) {
    Automaton utf8automaton = new UTF32ToUTF8().convert(automaton);
    utf8automaton = Operations.determinize(utf8automaton, maxDeterminizedStates);
    automaton = utf8automaton;
  }
  // TODO Accumulating all refs is bad, because the resulting set may be very big.
  // TODO Better iterate over automaton again inside FuzzyCompletionWeight?
  return new FuzzyCompletionWeight(this, automaton, refs);
}
 
源代码12 项目: lucene-solr   文件: TestGeo3DPoint.java
public void testBasic() throws Exception {
  Directory dir = getDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig();
  iwc.setCodec(getCodec());
  IndexWriter w = new IndexWriter(dir, iwc);
  Document doc = new Document();
  PlanetModel planetModel = randomPlanetModel();
  doc.add(new Geo3DPoint("field", planetModel, 50.7345267, -97.5303555));
  w.addDocument(doc);
  IndexReader r = DirectoryReader.open(w);
  // We can't wrap with "exotic" readers because the query must see the BKD3DDVFormat:
  IndexSearcher s = newSearcher(r, false);
  assertEquals(1, s.search(Geo3DPoint.newShapeQuery("field",
                                                    GeoCircleFactory.makeGeoCircle(planetModel, toRadians(50), toRadians(-97), Math.PI/180.)), 1).totalHits.value);
  w.close();
  r.close();
  dir.close();
}
 
源代码13 项目: lucene-solr   文件: TestQueryBuilder.java
public void testMaxBooleanClause() throws Exception {
  int size = 34;
  CannedBinaryTokenStream.BinaryToken[] tokens = new CannedBinaryTokenStream.BinaryToken[size];
  BytesRef term1 = new BytesRef("ff");
  BytesRef term2 = new BytesRef("f");
  for (int i = 0; i < size;) {
    if (i % 2 == 0) {
      tokens[i] = new CannedBinaryTokenStream.BinaryToken(term2, 1, 1);
      tokens[i + 1] = new CannedBinaryTokenStream.BinaryToken(term1, 0, 2);
      i += 2;
    } else {
      tokens[i] = new CannedBinaryTokenStream.BinaryToken(term2, 1, 1);
      i ++;
    }
  }
  QueryBuilder qb = new QueryBuilder(null);
  try (TokenStream ts = new CannedBinaryTokenStream(tokens)) {
    expectThrows(IndexSearcher.TooManyClauses.class, () -> qb.analyzeGraphBoolean("", ts, BooleanClause.Occur.MUST));
  }
  try (TokenStream ts = new CannedBinaryTokenStream(tokens)) {
    expectThrows(IndexSearcher.TooManyClauses.class, () -> qb.analyzeGraphBoolean("", ts, BooleanClause.Occur.SHOULD));
  }
  try (TokenStream ts = new CannedBinaryTokenStream(tokens)) {
    expectThrows(IndexSearcher.TooManyClauses.class, () -> qb.analyzeGraphPhrase(ts, "", 0));
  }
}
 
源代码14 项目: tagme   文件: AnchorIndexer.java
static int freq(Set<String> anchors, IndexSearcher index, QueryParser queryParser) throws IOException
{
	//int sum = 0;
	BitSet bits = new BitSet(index.maxDoc());
	for(String a : anchors)
	{
		try {
			Query q = queryParser.parse(String.format(QUERY_PATTERN, QueryParser.escape(a)));
			
			TotalHitCountCollectorSet results = new TotalHitCountCollectorSet(bits);
			
			index.search(q, results);
		
			//sum += results.getTotalHits();
		
		} catch (ParseException e) {
			
		}
	}
	return bits.cardinality();
}
 
源代码15 项目: lucene4ir   文件: LanguageModel.java
public LanguageModel(IndexReader ir, int[] doc_ids) {
    reader = ir;
    searcher = new IndexSearcher(reader);
    this.doc_ids = doc_ids;
    doc_len = 0.0;
    for (int doc_id : doc_ids) {
        updateTermCountMap(doc_id, 1.0);
    }

    try {
        collectionStats = searcher.collectionStatistics(field);
        token_count = collectionStats.sumTotalTermFreq();
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(1);
    }
}
 
源代码16 项目: onedev   文件: DefaultIndexManager.java
@Override
public boolean isIndexed(Project project, ObjectId commit) {
	File indexDir = storageManager.getProjectIndexDir(project.getId());
	try (Directory directory = FSDirectory.open(indexDir.toPath())) {
		if (DirectoryReader.indexExists(directory)) {
			try (IndexReader reader = DirectoryReader.open(directory)) {
				IndexSearcher searcher = new IndexSearcher(reader);
				return getIndexVersion().equals(getCommitIndexVersion(searcher, commit));
			}
		} else {
			return false;
		}
	} catch (IOException e) {
		throw new RuntimeException(e);
	}
}
 
源代码17 项目: solr-redis   文件: TestRedisQParser.java
@Test
public void shouldReturnEmptyQueryOnEmptyListOfHget() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("hget");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get("field")).thenReturn("f1");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.hget(anyString(), anyString())).thenReturn(null);
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).hget("simpleKey", "f1");
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(0, terms.size());
}
 
public void testMatchesSlopBug() throws IOException {
  IndexReader ir = indexSomeFields();
  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new IntervalQuery("title", Intervals.maxgaps(random().nextBoolean() ? 1 : 2,
      Intervals.ordered(
          Intervals.term("this"), Intervals.term("is"), Intervals.term("the"), Intervals.term("field"))));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(1, topDocs.totalHits.value);
  String[] snippets = highlighter.highlight("title", query, topDocs, 10);
  assertEquals(1, snippets.length);
  if (highlighter.getFlags("title").contains(HighlightFlag.WEIGHT_MATCHES)) {
    assertEquals("" + highlighter.getFlags("title"),
        "<b>This is the title field</b>.", snippets[0]);
  } else {
    assertEquals("" + highlighter.getFlags("title"),
        "<b>This</b> <b>is</b> <b>the</b> title <b>field</b>.", snippets[0]);
  }
  ir.close();
}
 
源代码19 项目: lucene-solr   文件: HashQParserPlugin.java
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {

  SolrIndexSearcher solrIndexSearcher = (SolrIndexSearcher)searcher;
  IndexReaderContext context = solrIndexSearcher.getTopReaderContext();

  List<LeafReaderContext> leaves =  context.leaves();
  FixedBitSet[] fixedBitSets = new FixedBitSet[leaves.size()];

  for(LeafReaderContext leaf : leaves) {
    try {
      SegmentPartitioner segmentPartitioner = new SegmentPartitioner(leaf,worker,workers, keys, solrIndexSearcher);
      segmentPartitioner.run();
      fixedBitSets[segmentPartitioner.context.ord] = segmentPartitioner.docs;
    } catch(Exception e) {
      throw new IOException(e);
    }
  }

  ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(new BitsFilter(fixedBitSets));
  return searcher.rewrite(constantScoreQuery).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
}
 
源代码20 项目: Lottery   文件: LuceneContentSvcImpl.java
@Transactional(readOnly = true)
public Pagination searchPage(Directory dir, String queryString,String category,String workplace,
		Integer siteId, Integer channelId, Date startDate, Date endDate,
		int pageNo, int pageSize) throws CorruptIndexException,
		IOException, ParseException {
	Searcher searcher = new IndexSearcher(dir);
	try {
		Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
		Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
				channelId, startDate, endDate, analyzer);
		TopDocs docs = searcher.search(query, pageNo * pageSize);
		Pagination p = LuceneContent.getResultPage(searcher, docs, pageNo,
				pageSize);
		List<?> ids = p.getList();
		List<Content> contents = new ArrayList<Content>(ids.size());
		for (Object id : ids) {
			contents.add(contentMng.findById((Integer) id));
		}
		p.setList(contents);
		return p;
	} finally {
		searcher.close();
	}
}
 
源代码21 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
@Override
public long getCount() throws IOException {
  if (searcherMgr == null) {
    return 0;
  }
  SearcherManager mgr;
  IndexSearcher searcher;
  synchronized (searcherMgrLock) {
    mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
    searcher = mgr.acquire();
  }
  try {
    return searcher.getIndexReader().numDocs();
  } finally {
    mgr.release(searcher);
  }
}
 
源代码22 项目: lucene-solr   文件: TestBigIntegerPoint.java
/** Add a single 1D point and search for it */
public void testBasics() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with a large biginteger value
  Document document = new Document();
  BigInteger large = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64));
  document.add(new BigIntegerPoint("field", large));
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  assertEquals(1, searcher.count(BigIntegerPoint.newExactQuery("field", large)));
  assertEquals(1, searcher.count(BigIntegerPoint.newRangeQuery("field", large.subtract(BigInteger.ONE), large.add(BigInteger.ONE))));
  assertEquals(1, searcher.count(BigIntegerPoint.newSetQuery("field", large)));
  assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field", large.subtract(BigInteger.ONE))));
  assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field")));

  reader.close();
  writer.close();
  dir.close();
}
 
源代码23 项目: lucene-solr   文件: TestSpanTermQuery.java
public void testNoPositions() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new StringField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  
  IndexReader ir = iw.getReader();
  iw.close();
  
  IndexSearcher is = new IndexSearcher(ir);
  SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar"));
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    is.search(query, 5);
  });
  assertTrue(expected.getMessage().contains("was indexed without position data"));

  ir.close();
  dir.close();
}
 
源代码24 项目: solr-redis   文件: TestRedisQParser.java
@Test
public void shouldParseJsonTermsFromRedisOnGetCommand() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("get");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get("serialization")).thenReturn("json");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.get(any(byte[].class))).thenReturn("[1,2,3]".getBytes());
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).get("simpleKey".getBytes());
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(3, terms.size());
}
 
源代码25 项目: lucene-solr   文件: SegmentInfosSearcherManager.java
@Override
protected IndexSearcher refreshIfNeeded(IndexSearcher old) throws IOException {
  List<LeafReader> subs;
  if (old == null) {
    subs = null;
  } else {
    subs = new ArrayList<>();
    for(LeafReaderContext ctx : old.getIndexReader().leaves()) {
      subs.add(ctx.reader());
    }
  }

  // Open a new reader, sharing any common segment readers with the old one:
  DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs);
  addReaderClosedListener(r);
  node.message("refreshed to version=" + currentInfos.getVersion() + " r=" + r);
  return SearcherManager.getSearcher(searcherFactory, r, old.getIndexReader());
}
 
源代码26 项目: gerbil   文件: IndexBasedEntityChecker.java
public static IndexBasedEntityChecker create(String indexDirPath) {
    Directory indexDirectory = null;
    File directoryPath = new File(indexDirPath);
    if (directoryPath.exists() && directoryPath.isDirectory() && (directoryPath.list().length > 0)) {
        try {
            indexDirectory = FSDirectory.open(directoryPath.toPath());
            IndexReader indexReader = DirectoryReader.open(indexDirectory);
            IndexSearcher indexSearcher = new IndexSearcher(indexReader);
            return new IndexBasedEntityChecker(indexSearcher, indexDirectory, indexReader);
        } catch (IOException e) {
            LOGGER.error("Exception while trying to open index for entity checking. Returning null.", e);
            IOUtils.closeQuietly(indexDirectory);
            return null;
        }
    } else {
        LOGGER.warn(
                "The configured path to the entity checking index (\"{}\") does not exist,  is not a directory or is an empty directory. Returning null.",
                directoryPath.toString());
        return null;
    }
}
 
源代码27 项目: solr-redis   文件: TestRedisQParser.java
@Test
public void shouldAddTermsFromRedisOnLrangeCommandCustomMax() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("lrange");
  when(localParamsMock.get("max")).thenReturn("1");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.lrange(anyString(), anyLong(), anyLong())).thenReturn(Arrays.asList("123", "321"));
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).lrange("simpleKey", 0, 1);
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(2, terms.size());
}
 
源代码28 项目: lucene-solr   文件: TestFieldCacheSort.java
/** test that we throw exception on multi-valued field, creates corrupt reader, use SORTED_SET instead */
public void testMultiValuedField() throws IOException {
  Directory indexStore = newDirectory();
  IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random())));
  for(int i=0; i<5; i++) {
      Document doc = new Document();
      doc.add(new StringField("string", "a"+i, Field.Store.NO));
      doc.add(new StringField("string", "b"+i, Field.Store.NO));
      writer.addDocument(doc);
  }
  writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
  writer.close();
  Sort sort = new Sort(
      new SortField("string", SortField.Type.STRING),
      SortField.FIELD_DOC);
  IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore),
                       Collections.singletonMap("string", Type.SORTED));
  IndexSearcher searcher = new IndexSearcher(reader);
  expectThrows(IllegalStateException.class, () -> {
    searcher.search(new MatchAllDocsQuery(), 500, sort);
  });
  reader.close();
  indexStore.close();
}
 
源代码29 项目: AGDISTIS   文件: TripleIndex.java
public TripleIndex() throws IOException {
	Properties prop = new Properties();
	InputStream input = TripleIndex.class.getResourceAsStream("/config/agdistis.properties");
	prop.load(input);

	String envIndex = System.getenv("AGDISTIS_INDEX");
	String index = envIndex != null ? envIndex : prop.getProperty("index");
	log.info("The index will be here: " + index);

	directory = new MMapDirectory(new File(index));
	ireader = DirectoryReader.open(directory);
	isearcher = new IndexSearcher(ireader);
	this.urlValidator = new UrlValidator();

	cache = CacheBuilder.newBuilder().maximumSize(50000).build();
}
 
源代码30 项目: incubator-pinot   文件: TextSearchQueriesTest.java
@Override
public void run() {
  try {
    Query query = queryParser.parse("\"machine learning\" AND spark");
    int count  = 0;
    int prevHits = 0;
    // run the same query 1000 times and see in increasing number of hits
    // in the index
    while (count < 1000) {
      IndexSearcher indexSearcher = searchManager.acquire();
      int hits = indexSearcher.search(query, Integer.MAX_VALUE).scoreDocs.length;
      // TODO: see how we can make this more deterministic
      if (count > 200) {
        // we should see an increasing number of hits
        Assert.assertTrue(hits > 0);
        Assert.assertTrue(hits >= prevHits);
      }
      count++;
      prevHits = hits;
      searchManager.release(indexSearcher);
      Thread.sleep(1);
    }
  } catch (Exception e) {
    throw new RuntimeException("Caught exception in realtime reader");
  }
}
 
 类所在包
 同包方法