org.apache.lucene.index.RandomIndexWriter#getReader ( )源码实例Demo

下面列出了org.apache.lucene.index.RandomIndexWriter#getReader ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: TestDirectSpellChecker.java
public void testBogusField() throws Exception {
  DirectSpellChecker spellChecker = new DirectSpellChecker();
  Directory dir = newDirectory();
  Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, analyzer);

  for (int i = 0; i < 20; i++) {
    Document doc = new Document();
    doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
    writer.addDocument(doc);
  }

  IndexReader ir = writer.getReader();

  SuggestWord[] similar = spellChecker.suggestSimilar(new Term(
      "bogusFieldBogusField", "fvie"), 2, ir,
      SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX);
  assertEquals(0, similar.length);
  
  IOUtils.close(ir, writer, dir, analyzer);
}
 
源代码2 项目: lucene-solr   文件: TestInetAddressPoint.java
/** Add a single address and search for it */
public void testBasics() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with an address
  Document document = new Document();
  InetAddress address = InetAddress.getByName("1.2.3.4");
  document.add(new InetAddressPoint("field", address));
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  assertEquals(1, searcher.count(InetAddressPoint.newExactQuery("field", address)));
  assertEquals(1, searcher.count(InetAddressPoint.newPrefixQuery("field", address, 24)));
  assertEquals(1, searcher.count(InetAddressPoint.newRangeQuery("field", InetAddress.getByName("1.2.3.3"), InetAddress.getByName("1.2.3.5"))));
  assertEquals(1, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.4"))));
  assertEquals(1, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.5"))));
  assertEquals(0, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.3"))));
  assertEquals(0, searcher.count(InetAddressPoint.newSetQuery("field")));

  reader.close();
  writer.close();
  dir.close();
}
 
源代码3 项目: lucene-solr   文件: TestSpanNotQuery.java
public void testNoPositions() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new StringField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  
  IndexReader ir = iw.getReader();
  iw.close();
  
  IndexSearcher is = new IndexSearcher(ir);
  SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar"));
  SpanTermQuery query2 = new SpanTermQuery(new Term("foo", "baz"));

  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    is.search(new SpanNotQuery(query, query2), 5);
  });
  assertTrue(expected.getMessage().contains("was indexed without position data"));

  ir.close();
  dir.close();
}
 
源代码4 项目: lucene-solr   文件: BaseGeoPointTestCase.java
/** test we can search for a multi-polygon */
public void testMultiPolygonBasics() throws Exception {
  assumeTrue("Impl does not support polygons", supportsPolygons());
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with a point
  Document document = new Document();
  addPointToDoc("field", document, 18.313694, -65.227444);
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  Polygon a = new Polygon(new double[] { 28, 28, 29, 29, 28 },
                         new double[] { -56, -55, -55, -56, -56 });
  Polygon b = new Polygon(new double[] { 18, 18, 19, 19, 18 },
                          new double[] { -66, -65, -65, -66, -66 });
  assertEquals(1, searcher.count(newPolygonQuery("field", a, b)));

  reader.close();
  writer.close();
  dir.close();
}
 
源代码5 项目: lucene-solr   文件: TestLatLonShape.java
public void testLUCENE8454() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  Polygon poly = new Polygon(new double[] {-1.490648725633769E-132d, 90d, 90d, -1.490648725633769E-132d},
      new double[] {0d, 0d, 180d, 0d});

  Document document = new Document();
  addPolygonsToDoc(FIELDNAME, document, poly);
  writer.addDocument(document);

  ///// search //////
  IndexReader reader = writer.getReader();
  writer.close();
  IndexSearcher searcher = newSearcher(reader);

  // search a bbox in the hole
  Query q = LatLonShape.newBoxQuery(FIELDNAME, QueryRelation.DISJOINT,-29.46555603761226d, 0.0d, 8.381903171539307E-8d, 0.9999999403953552d);
  assertEquals(1, searcher.count(q));

  IOUtils.close(reader, dir);
}
 
源代码6 项目: lucene-solr   文件: TestPhraseQuery.java
public void testSlopScoring() throws IOException {
  Directory directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory, 
      newIndexWriterConfig(new MockAnalyzer(random()))
        .setMergePolicy(newLogMergePolicy())
        .setSimilarity(new BM25Similarity()));

  Document doc = new Document();
  doc.add(newTextField("field", "foo firstname lastname foo", Field.Store.YES));
  writer.addDocument(doc);
  
  Document doc2 = new Document();
  doc2.add(newTextField("field", "foo firstname zzz lastname foo", Field.Store.YES));
  writer.addDocument(doc2);
  
  Document doc3 = new Document();
  doc3.add(newTextField("field", "foo firstname zzz yyy lastname foo", Field.Store.YES));
  writer.addDocument(doc3);
  
  IndexReader reader = writer.getReader();
  writer.close();

  IndexSearcher searcher = newSearcher(reader);
  searcher.setSimilarity(new ClassicSimilarity());
  PhraseQuery query = new PhraseQuery(Integer.MAX_VALUE, "field", "firstname", "lastname");
  ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
  assertEquals(3, hits.length);
  // Make sure that those matches where the terms appear closer to
  // each other get a higher score:
  assertEquals(1.0, hits[0].score, 0.01);
  assertEquals(0, hits[0].doc);
  assertEquals(0.63, hits[1].score, 0.01);
  assertEquals(1, hits[1].doc);
  assertEquals(0.47, hits[2].score, 0.01);
  assertEquals(2, hits[2].doc);
  QueryUtils.check(random(), query,searcher);
  reader.close();
  directory.close();
}
 
@Before
public void setupIndex() throws IOException {
    dirUnderTest = newDirectory();
    List<Similarity> sims = Arrays.asList(
            new ClassicSimilarity(),
            new SweetSpotSimilarity(), // extends Classic
            new BM25Similarity(),
            new LMDirichletSimilarity(),
            new BooleanSimilarity(),
            new LMJelinekMercerSimilarity(0.2F),
            new AxiomaticF3LOG(0.5F, 10),
            new DFISimilarity(new IndependenceChiSquared()),
            new DFRSimilarity(new BasicModelG(), new AfterEffectB(), new NormalizationH1()),
            new IBSimilarity(new DistributionLL(), new LambdaDF(), new NormalizationH3())
        );
    similarity = sims.get(random().nextInt(sims.size()));

    indexWriterUnderTest = new RandomIndexWriter(random(), dirUnderTest, newIndexWriterConfig().setSimilarity(similarity));
    for (int i = 0; i < docs.length; i++) {
        Document doc = new Document();
        doc.add(newStringField("id", "" + i, Field.Store.YES));
        doc.add(newField("field", docs[i], Store.YES));
        indexWriterUnderTest.addDocument(doc);
    }
    indexWriterUnderTest.commit();
    indexWriterUnderTest.forceMerge(1);
    indexWriterUnderTest.flush();


    indexReaderUnderTest = indexWriterUnderTest.getReader();
    searcherUnderTest = newSearcher(indexReaderUnderTest);
    searcherUnderTest.setSimilarity(similarity);
}
 
源代码8 项目: lucene-solr   文件: TestBooleanQuery.java
public void testFILTERClauseBehavesLikeMUST() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  Field f = newTextField("field", "a b c d", Field.Store.NO);
  doc.add(f);
  w.addDocument(doc);
  f.setStringValue("b d");
  w.addDocument(doc);
  f.setStringValue("d");
  w.addDocument(doc);
  w.commit();

  DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = new IndexSearcher(reader);

  for (List<String> requiredTerms : Arrays.<List<String>>asList(
      Arrays.asList("a", "d"),
      Arrays.asList("a", "b", "d"),
      Arrays.asList("d"),
      Arrays.asList("e"),
      Arrays.asList())) {
    final BooleanQuery.Builder bq1 = new BooleanQuery.Builder();
    final BooleanQuery.Builder bq2 = new BooleanQuery.Builder();
    for (String term : requiredTerms) {
      final Query q = new TermQuery(new Term("field", term));
      bq1.add(q, Occur.MUST);
      bq2.add(q, Occur.FILTER);
    }

    final BitSet matches1 = getMatches(searcher, bq1.build());
    final BitSet matches2 = getMatches(searcher, bq2.build());
    assertEquals(matches1, matches2);
  }

  reader.close();
  w.close();
  dir.close();
}
 
源代码9 项目: lucene-solr   文件: TestFuzzyQuery.java
/** 
 * MultiTermQuery provides (via attribute) information about which values
 * must be competitive to enter the priority queue. 
 * 
 * FuzzyQuery optimizes itself around this information, if the attribute
 * is not implemented correctly, there will be problems!
 */
public void testTieBreaker() throws Exception {
  Directory directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory);
  addDoc("a123456", writer);
  addDoc("c123456", writer);
  addDoc("d123456", writer);
  addDoc("e123456", writer);
  
  Directory directory2 = newDirectory();
  RandomIndexWriter writer2 = new RandomIndexWriter(random(), directory2);
  addDoc("a123456", writer2);
  addDoc("b123456", writer2);
  addDoc("b123456", writer2);
  addDoc("b123456", writer2);
  addDoc("c123456", writer2);
  addDoc("f123456", writer2);
  
  IndexReader ir1 = writer.getReader();
  IndexReader ir2 = writer2.getReader();
  
  MultiReader mr = new MultiReader(ir1, ir2);
  IndexSearcher searcher = newSearcher(mr);
  FuzzyQuery fq = new FuzzyQuery(new Term("field", "z123456"), 1, 0, 2, false);
  TopDocs docs = searcher.search(fq, 2);
  assertEquals(5, docs.totalHits.value); // 5 docs, from the a and b's
  mr.close();
  ir1.close();
  ir2.close();
  writer.close();
  writer2.close();
  directory.close();
  directory2.close(); 
}
 
源代码10 项目: lucene-solr   文件: TestBooleanQuery.java
public void testExclusionPropagatesApproximations() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  Field f = newTextField("field", "a b c", Field.Store.NO);
  doc.add(f);
  w.addDocument(doc);
  w.commit();

  DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = new IndexSearcher(reader);
  searcher.setQueryCache(null); // to still have approximations

  PhraseQuery pq = new PhraseQuery("field", "a", "b");

  BooleanQuery.Builder q = new BooleanQuery.Builder();
  q.add(pq, Occur.SHOULD);
  q.add(new TermQuery(new Term("field", "c")), Occur.MUST_NOT);

  final Weight weight = searcher.createWeight(searcher.rewrite(q.build()), ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(reader.leaves().get(0));
  assertTrue(scorer instanceof ReqExclScorer);
  assertNotNull(scorer.twoPhaseIterator());

  reader.close();
  w.close();
  dir.close();
}
 
public void testIndexSortDocValuesWithOddLength(boolean reverse) throws Exception {
  Directory dir = newDirectory();

  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  Sort indexSort = new Sort(new SortedNumericSortField("field", SortField.Type.LONG, reverse));
  iwc.setIndexSort(indexSort);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);

  writer.addDocument(createDocument("field", -80));
  writer.addDocument(createDocument("field", -5));
  writer.addDocument(createDocument("field", 0));
  writer.addDocument(createDocument("field", 0));
  writer.addDocument(createDocument("field", 5));
  writer.addDocument(createDocument("field", 30));
  writer.addDocument(createDocument("field", 35));

  DirectoryReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);

  // Test ranges consisting of one value.
  assertEquals(1, searcher.count(createQuery("field", -80, -80)));
  assertEquals(1, searcher.count(createQuery("field", -5, -5)));
  assertEquals(2, searcher.count(createQuery("field", 0, 0)));
  assertEquals(1, searcher.count(createQuery("field", 5, 5)));
  assertEquals(1, searcher.count(createQuery("field", 30, 30)));
  assertEquals(1, searcher.count(createQuery("field", 35, 35)));

  assertEquals(0, searcher.count(createQuery("field", -90, -90)));
  assertEquals(0, searcher.count(createQuery("field", 6, 6)));
  assertEquals(0, searcher.count(createQuery("field", 40, 40)));

  // Test the lower end of the document value range.
  assertEquals(2, searcher.count(createQuery("field", -90, -4)));
  assertEquals(2, searcher.count(createQuery("field", -80, -4)));
  assertEquals(1, searcher.count(createQuery("field", -70, -4)));
  assertEquals(2, searcher.count(createQuery("field", -80, -5)));

  // Test the upper end of the document value range.
  assertEquals(1, searcher.count(createQuery("field", 25, 34)));
  assertEquals(2, searcher.count(createQuery("field", 25, 35)));
  assertEquals(2, searcher.count(createQuery("field", 25, 36)));
  assertEquals(2, searcher.count(createQuery("field", 30, 35)));

  // Test multiple occurrences of the same value.
  assertEquals(2, searcher.count(createQuery("field", -4, 4)));
  assertEquals(2, searcher.count(createQuery("field", -4, 0)));
  assertEquals(2, searcher.count(createQuery("field", 0, 4)));
  assertEquals(4, searcher.count(createQuery("field", 0, 30)));

  // Test ranges that span all documents.
  assertEquals(7, searcher.count(createQuery("field", -80, 35)));
  assertEquals(7, searcher.count(createQuery("field", -90, 40)));

  writer.close();
  reader.close();
  dir.close();
}
 
源代码12 项目: lucene-solr   文件: TestXYShape.java
/** test we can search for a point with a standard number of vertices*/
public void testBasicIntersects() throws Exception {
  int numVertices = TestUtil.nextInt(random(), 50, 100);
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a random polygon document
  XYPolygon p = ShapeTestUtil.createRegularPolygon(0, 90, atLeast(1000000), numVertices);
  Document document = new Document();
  addPolygonsToDoc(FIELDNAME, document, p);
  writer.addDocument(document);

  // add a line document
  document = new Document();
  // add a line string
  float x[] = new float[p.numPoints() - 1];
  float y[] = new float[p.numPoints() - 1];
  for (int i = 0; i < x.length; ++i) {
    x[i] = p.getPolyX(i);
    y[i] = p.getPolyY(i);
  }
  XYLine l = new XYLine(x, y);
  addLineToDoc(FIELDNAME, document, l);
  writer.addDocument(document);

  ////// search /////
  // search an intersecting bbox
  IndexReader reader = writer.getReader();
  writer.close();
  IndexSearcher searcher = newSearcher(reader);
  float minX = Math.min(x[0], x[1]);
  float minY = Math.min(y[0], y[1]);
  float maxX = Math.max(x[0], x[1]);
  float maxY = Math.max(y[0], y[1]);
  Query q = newRectQuery(FIELDNAME, minX, maxX, minY, maxY);
  assertEquals(2, searcher.count(q));

  // search a disjoint bbox
  q = newRectQuery(FIELDNAME, p.minX-1f, p.minX + 1f, p.minY - 1f, p.minY + 1f);
  assertEquals(0, searcher.count(q));

  // search w/ an intersecting polygon
  q = XYShape.newPolygonQuery(FIELDNAME, QueryRelation.INTERSECTS, new XYPolygon(
      new float[] {minX, minX, maxX, maxX, minX},
      new float[] {minY, maxY, maxY, minY, minY}
  ));
  assertEquals(2, searcher.count(q));

  // search w/ an intersecting line
  q = XYShape.newLineQuery(FIELDNAME, QueryRelation.INTERSECTS, new XYLine(
     new float[] {minX, minX, maxX, maxX},
     new float[] {minY, maxY, maxY, minY}
  ));
  assertEquals(2, searcher.count(q));

  IOUtils.close(reader, dir);
}
 
源代码13 项目: lucene-solr   文件: TestFilteredDocIdSet.java
public void testNullIteratorFilteredDocIdSet() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newStringField("c", "val", Field.Store.NO));
  writer.addDocument(doc);
  IndexReader reader = writer.getReader();
  writer.close();
  
  // First verify the document is searchable.
  IndexSearcher searcher = newSearcher(reader);
  Assert.assertEquals(1, searcher.search(new MatchAllDocsQuery(), 10).totalHits.value);
  
    // Now search w/ a Filter which returns a null DocIdSet
  Filter f = new Filter() {
    @Override
    public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) {
      final DocIdSet innerNullIteratorSet = new DocIdSet() {
        @Override
        public DocIdSetIterator iterator() {
          return null;
        } 

        @Override
        public long ramBytesUsed() {
          return 0L;
        }
      };
      return new FilteredDocIdSet(innerNullIteratorSet) {
        @Override
        protected boolean match(int docid) {
          return true;
        }
      };
    }

    @Override
    public String toString(String field) {
      return "nullDocIdSetFilter";
    }
    
    @Override
    public boolean equals(Object other) {
      return other == this;
    }
    
    @Override
    public int hashCode() {
      return System.identityHashCode(this);
    }
  };
  
  Query filtered = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), Occur.MUST)
      .add(f, Occur.FILTER)
      .build();
  Assert.assertEquals(0, searcher.search(filtered, 10).totalHits.value);
  reader.close();
  dir.close();
}
 
源代码14 项目: lucene-solr   文件: TestUnifiedHighlighterMTQ.java
public void testRanges() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = TermRangeQuery.newStringRange("body", "ta", "tf", true, true);
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  // null start
  query = TermRangeQuery.newStringRange("body", null, "tf", true, true);
  topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This <b>is</b> <b>a</b> <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> <b>a</b> <b>one</b> <b>sentence</b> <b>document</b>.", snippets[1]);

  // null end
  query = TermRangeQuery.newStringRange("body", "ta", null, true, true);
  topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("<b>This</b> is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  // exact start inclusive
  query = TermRangeQuery.newStringRange("body", "test", "tf", true, true);
  topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  // exact end inclusive
  query = TermRangeQuery.newStringRange("body", "ta", "test", true, true);
  topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  // exact start exclusive
  BooleanQuery bq = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD)
      .add(TermRangeQuery.newStringRange("body", "test", "tf", false, true), BooleanClause.Occur.SHOULD)
      .build();
  topDocs = searcher.search(bq, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", bq, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a test.", snippets[0]);
  assertEquals("Test a one sentence document.", snippets[1]);

  // exact end exclusive
  bq = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD)
      .add(TermRangeQuery.newStringRange("body", "ta", "test", true, false), BooleanClause.Occur.SHOULD)
      .build();
  topDocs = searcher.search(bq, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", bq, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a test.", snippets[0]);
  assertEquals("Test a one sentence document.", snippets[1]);

  // wrong field
  highlighter.setFieldMatcher(null);//default
  bq = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD)
      .add(TermRangeQuery.newStringRange("bogus", "ta", "tf", true, true), BooleanClause.Occur.SHOULD)
      .build();
  topDocs = searcher.search(bq, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", bq, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a test.", snippets[0]);
  assertEquals("Test a one sentence document.", snippets[1]);

  ir.close();
}
 
源代码15 项目: lucene-solr   文件: TestFieldMaskingSpanQuery.java
@BeforeClass
public static void beforeClass() throws Exception {
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  
  writer.addDocument(doc(new Field[] { field("id", "0")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "jones")     }));
                                             
  writer.addDocument(doc(new Field[] { field("id", "1")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "jones")     }));
  
  writer.addDocument(doc(new Field[] { field("id", "2")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "greta"),
                                       field("last",   "jones")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "jones")     }));
   
  writer.addDocument(doc(new Field[] { field("id", "3")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "lisa"),
                                       field("last",   "jones")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "bob"),
                                       field("last",   "costas")     }));
  
  writer.addDocument(doc(new Field[] { field("id", "4")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "linda"),
                                       field("last",   "dixit")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "bubba"),
                                       field("last",   "jones")     }));
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = new IndexSearcher(getOnlyLeafReader(reader));
}
 
源代码16 项目: lucene-solr   文件: TestPrefixCompletionQuery.java
public void testAnalyzerWithoutSeparator() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
  //note: when we don't preserve separators, the choice of preservePosInc is irrelevant
  CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, false, random().nextBoolean());
  final String field = getTestName();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field));
  Document document = new Document();
  document.add(new SuggestField(field, "foobar", 7));
  document.add(new SuggestField(field, "foo bar", 8));
  document.add(new SuggestField(field, "the fo", 9));
  document.add(new SuggestField(field, "the foo bar", 10));
  document.add(new SuggestField(field, "foo the bar", 11)); // middle stopword
  document.add(new SuggestField(field, "baz the", 12)); // trailing stopword

  iw.addDocument(document);

  // note we use the completionAnalyzer with the queries (instead of input analyzer) because of non-default settings
  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
  CompletionQuery query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "fo"));
  TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); //matches all with fo
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
  // with leading stopword
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "fo*"
  suggest = indexSearcher.suggest(query, 9, false);
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
  // with middle stopword
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foo the bar")); // becomes "foobar*"
  suggest = indexSearcher.suggest(query, 9, false);
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7));
  // no space
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foob"));
  suggest = indexSearcher.suggest(query, 9, false); // no separators, thus match several
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("foo bar", 8), new Entry("foobar", 7));
  // surrounding stopwords
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the baz the")); // becomes "baz*"
  suggest = indexSearcher.suggest(query, 4, false);// stopwords in query get removed so we match
  assertSuggestions(suggest, new Entry("baz the", 12));
  reader.close();
  iw.close();
}
 
源代码17 项目: lucene-solr   文件: TestLatLonShape.java
public void testLUCENE8669() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();

  Polygon indexPoly1 = new Polygon(
      new double[] {-7.5d, 15d, 15d, 0d, -7.5d},
      new double[] {-180d, -180d, -176d, -176d, -180d}
  );

  Polygon indexPoly2 = new Polygon(
      new double[] {15d, -7.5d, -15d, -10d, 15d, 15d},
      new double[] {180d, 180d, 176d, 174d, 176d, 180d}
  );

  addPolygonsToDoc(FIELDNAME, doc, indexPoly1);
  addPolygonsToDoc(FIELDNAME, doc, indexPoly2);
  w.addDocument(doc);
  w.forceMerge(1);

  ///// search //////
  IndexReader reader = w.getReader();
  w.close();
  IndexSearcher searcher = newSearcher(reader);

  Polygon[] searchPoly = new Polygon[] {
      new Polygon(new double[] {-20d, 20d, 20d, -20d, -20d},
          new double[] {-180d, -180d, -170d, -170d, -180d}),
      new Polygon(new double[] {20d, -20d, -20d, 20d, 20d},
          new double[] {180d, 180d, 170d, 170d, 180d})
  };

  Query q = LatLonShape.newPolygonQuery(FIELDNAME, QueryRelation.WITHIN, searchPoly);
  assertEquals(1, searcher.count(q));

  q = LatLonShape.newPolygonQuery(FIELDNAME, QueryRelation.INTERSECTS, searchPoly);
  assertEquals(1, searcher.count(q));

  q = LatLonShape.newPolygonQuery(FIELDNAME, QueryRelation.DISJOINT, searchPoly);
  assertEquals(0, searcher.count(q));

  q = LatLonShape.newBoxQuery(FIELDNAME, QueryRelation.WITHIN, -20, 20, 170, -170);
  assertEquals(1, searcher.count(q));

  q = LatLonShape.newBoxQuery(FIELDNAME, QueryRelation.INTERSECTS, -20, 20, 170, -170);
  assertEquals(1, searcher.count(q));

  q = LatLonShape.newBoxQuery(FIELDNAME, QueryRelation.DISJOINT, -20, 20, 170, -170);
  assertEquals(0, searcher.count(q));

  IOUtils.close(w, reader, dir);
}
 
源代码18 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testRandom() throws IOException {
  Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  TextField f = new TextField("foo", "foo", Store.NO);
  doc.add(f);
  w.addDocument(doc);
  IndexReader reader = w.getReader();

  final int maxSize;
  final long maxRamBytesUsed;
  final int iters;

  if (TEST_NIGHTLY) {
    maxSize = TestUtil.nextInt(random(), 1, 10000);
    maxRamBytesUsed = TestUtil.nextLong(random(), 1, 5000000);
    iters = atLeast(20000);
  } else {
    maxSize = TestUtil.nextInt(random(), 1, 1000);
    maxRamBytesUsed = TestUtil.nextLong(random(), 1, 500000);
    iters = atLeast(2000);
  }

  final LRUQueryCache queryCache = new LRUQueryCache(maxSize, maxRamBytesUsed, context -> random().nextBoolean(), Float.POSITIVE_INFINITY);
  IndexSearcher uncachedSearcher = null;
  IndexSearcher cachedSearcher = null;

  for (int i = 0; i < iters; ++i) {
    if (i == 0 || random().nextInt(100) == 1) {
      reader.close();
      f.setStringValue(RandomPicks.randomFrom(random(), Arrays.asList("foo", "bar", "bar baz")));
      w.addDocument(doc);
      if (random().nextBoolean()) {
        w.deleteDocuments(buildRandomQuery(0));
      }
      reader = w.getReader();
      uncachedSearcher = newSearcher(reader);
      uncachedSearcher.setQueryCache(null);
      cachedSearcher = newSearcher(reader);
      cachedSearcher.setQueryCache(queryCache);
      cachedSearcher.setQueryCachingPolicy(ALWAYS_CACHE);
    }
    final Query q = buildRandomQuery(0);
    assertEquals(uncachedSearcher.count(q), cachedSearcher.count(q));
    if (rarely()) {
      queryCache.assertConsistent();
    }
  }
  queryCache.assertConsistent();
  w.close();
  reader.close();
  dir.close();
  queryCache.assertConsistent();
}
 
源代码19 项目: lucene-solr   文件: TestNearest.java
public void testNearestNeighborRandom() throws Exception {
  
  int numPoints = atLeast(1000);
  Directory dir;
  if (numPoints > 100000) {
    dir = newFSDirectory(createTempDir(getClass().getSimpleName()));
  } else {
    dir = newDirectory();
  }
  double[] lats = new double[numPoints];
  double[] lons = new double[numPoints];

  IndexWriterConfig iwc = getIndexWriterConfig();
  iwc.setMergePolicy(newLogMergePolicy());
  iwc.setMergeScheduler(new SerialMergeScheduler());
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  for(int id=0;id<numPoints;id++) {
    lats[id] = quantizeLat(GeoTestUtil.nextLatitude());
    lons[id] = quantizeLon(GeoTestUtil.nextLongitude());
    Document doc = new Document();
    doc.add(new LatLonPoint("point", lats[id], lons[id]));
    doc.add(new LatLonDocValuesField("point", lats[id], lons[id]));
    doc.add(new StoredField("id", id));
    w.addDocument(doc);
  }

  if (random().nextBoolean()) {
    w.forceMerge(1);
  }

  DirectoryReader r = w.getReader();
  if (VERBOSE) {      
    System.out.println("TEST: reader=" + r);
  }
  // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl:
  IndexSearcher s = newSearcher(r, false);
  int iters = atLeast(100);
  for(int iter=0;iter<iters;iter++) {
    if (VERBOSE) {      
      System.out.println("\nTEST: iter=" + iter);
    }
    double pointLat = GeoTestUtil.nextLatitude();
    double pointLon = GeoTestUtil.nextLongitude();

    // dumb brute force search to get the expected result:
    FieldDoc[] expectedHits = new FieldDoc[lats.length];
    for(int id=0;id<lats.length;id++) {
      double distance = SloppyMath.haversinMeters(pointLat, pointLon, lats[id], lons[id]);
      FieldDoc hit = new FieldDoc(id, 0.0f, new Object[] {Double.valueOf(distance)});
      expectedHits[id] = hit;
    }

    Arrays.sort(expectedHits, new Comparator<FieldDoc>() {
        @Override
        public int compare(FieldDoc a, FieldDoc  b) {
          int cmp = Double.compare(((Double) a.fields[0]).doubleValue(), ((Double) b.fields[0]).doubleValue());
          if (cmp != 0) {
            return cmp;
          }
          // tie break by smaller docID:
          return a.doc - b.doc;
        }
      });

    int topN = TestUtil.nextInt(random(), 1, lats.length);

    if (VERBOSE) {
      System.out.println("\nhits for pointLat=" + pointLat + " pointLon=" + pointLon);
    }

    // Also test with MatchAllDocsQuery, sorting by distance:
    TopFieldDocs fieldDocs = s.search(new MatchAllDocsQuery(), topN, new Sort(LatLonDocValuesField.newDistanceSort("point", pointLat, pointLon)));

    ScoreDoc[] hits = LatLonPointPrototypeQueries.nearest(s, "point", pointLat, pointLon, topN).scoreDocs;
    for(int i=0;i<topN;i++) {
      FieldDoc expected = expectedHits[i];
      FieldDoc expected2 = (FieldDoc) fieldDocs.scoreDocs[i];
      FieldDoc actual = (FieldDoc) hits[i];
      Document actualDoc = r.document(actual.doc);

      if (VERBOSE) {
        System.out.println("hit " + i);
        System.out.println("  expected id=" + expected.doc+ " lat=" + lats[expected.doc] + " lon=" + lons[expected.doc]
            + " distance=" + ((Double) expected.fields[0]).doubleValue() + " meters");
        System.out.println("  actual id=" + actualDoc.getField("id") + " distance=" + actual.fields[0] + " meters");
      }

      assertEquals(expected.doc, actual.doc);
      assertEquals(((Double) expected.fields[0]).doubleValue(), ((Double) actual.fields[0]).doubleValue(), 0.0);

      assertEquals(expected2.doc, actual.doc);
      assertEquals(((Double) expected2.fields[0]).doubleValue(), ((Double) actual.fields[0]).doubleValue(), 0.0);
    }
  }

  r.close();
  w.close();
  dir.close();
}
 
源代码20 项目: lucene-solr   文件: BaseXYPointTestCase.java
public void testRectBoundariesAreInclusive() throws Exception {
  XYRectangle rect = ShapeTestUtil.nextBox(random());
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig();
  // Else seeds may not reproduce:
  iwc.setMergeScheduler(new SerialMergeScheduler());
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  for(int i = 0; i < 3; i++) {
    float y;
    if (i == 0) {
      y = rect.minY;
    } else if (i == 1) {
      y = (float) (((double) rect.minY + rect.maxY) / 2.0);
    } else {
      y = rect.maxY;
    }
    for(int j = 0; j < 3; j++) {
      float x;
      if (j == 0) {
        x = rect.minX;
      } else if (j == 1) {
        if (i == 1) {
          continue;
        }
        x = (float) (((double) rect.minX + rect.maxX) / 2.0);
      } else {
        x = rect.maxX;
      }

      Document doc = new Document();
      addPointToDoc(FIELD_NAME, doc, x, y);
      w.addDocument(doc);
    }
  }
  IndexReader r = w.getReader();
  IndexSearcher s = newSearcher(r, false);
  // exact edge cases
  assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, rect.maxX, rect.minY, rect.maxY)));
  // expand 1 ulp in each direction if possible and test a slightly larger box!
  if (rect.minX != -Float.MAX_VALUE) {
    assertEquals(8, s.count(newRectQuery(FIELD_NAME, Math.nextDown(rect.minX), rect.maxX, rect.minY, rect.maxY)));
  }
  if (rect.maxX != Float.MAX_VALUE) {
    assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, Math.nextUp(rect.maxX), rect.minY, rect.maxY)));
  }
  if (rect.minY != -Float.MAX_VALUE) {
    assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, rect.maxX, Math.nextDown(rect.minY), rect.maxY)));
  }
  if (rect.maxY != Float.MAX_VALUE) {
    assertEquals(8, s.count(newRectQuery(FIELD_NAME, rect.minX, rect.maxX, rect.minY, Math.nextUp(rect.maxY))));
  }

  r.close();
  w.close();
  dir.close();
}