org.apache.lucene.index.RandomIndexWriter#addDocument ( )源码实例Demo

下面列出了org.apache.lucene.index.RandomIndexWriter#addDocument ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: TestPrefixCompletionQuery.java
public void testEmptyPrefixContextQuery() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  Document document = new Document();
  document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type"));
  iw.addDocument(document);

  if (rarely()) {
    iw.commit();
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "")));
  query.addContext("type", 1);

  TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
  assertEquals(0, suggest.scoreDocs.length);

  reader.close();
  iw.close();
}
 
源代码2 项目: lucene-solr   文件: TestDirectSpellChecker.java
public void testBogusField() throws Exception {
  DirectSpellChecker spellChecker = new DirectSpellChecker();
  Directory dir = newDirectory();
  Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, analyzer);

  for (int i = 0; i < 20; i++) {
    Document doc = new Document();
    doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
    writer.addDocument(doc);
  }

  IndexReader ir = writer.getReader();

  SuggestWord[] similar = spellChecker.suggestSimilar(new Term(
      "bogusFieldBogusField", "fvie"), 2, ir,
      SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX);
  assertEquals(0, similar.length);
  
  IOUtils.close(ir, writer, dir, analyzer);
}
 
源代码3 项目: lucene-solr   文件: TestUnifiedHighlighter.java
public void testPassageRanking() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.  Just highlighting from postings. This is also a much sillier test.  Feel free to test test test test test test test.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new TermQuery(new Term("body", "test"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(1, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs, 2);
  assertEquals(1, snippets.length);
  assertEquals("This is a <b>test</b>.  ... Feel free to <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[0]);

  ir.close();
}
 
源代码4 项目: lucene-solr   文件: TestFeatureDoubleValues.java
public void testFeatureMissingFieldInSegment() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  writer.addDocument(doc);
  writer.commit();
  IndexReader ir = writer.getReader();
  writer.close();
  
  assertEquals(1, ir.leaves().size());
  LeafReaderContext context = ir.leaves().get(0);
  DoubleValuesSource valuesSource = FeatureField.newDoubleValues("field", "name");
  DoubleValues values = valuesSource.getValues(context, null);

  assertFalse(values.advanceExact(0));
  assertFalse(values.advanceExact(1));

  ir.close();
  dir.close();
}
 
源代码5 项目: lucene-solr   文件: TestDocument.java
/**
 * Tests {@link Document#getValues(String)} method for a Document retrieved
 * from an index.
 * 
 * @throws Exception on error
 */
public void testGetValuesForIndexedDocument() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  writer.addDocument(makeDocumentWithFields());
  IndexReader reader = writer.getReader();
  
  IndexSearcher searcher = newSearcher(reader);
  
  // search for something that does exist
  Query query = new TermQuery(new Term("keyword", "test1"));
  
  // ensure that queries return expected results without DateFilter first
  ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
  assertEquals(1, hits.length);
  
  doAssert(searcher.doc(hits[0].doc), true);
  writer.close();
  reader.close();
  dir.close();
}
 
源代码6 项目: lucene-solr   文件: TestSpanNotQuery.java
public void testNoPositions() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new StringField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  
  IndexReader ir = iw.getReader();
  iw.close();
  
  IndexSearcher is = new IndexSearcher(ir);
  SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar"));
  SpanTermQuery query2 = new SpanTermQuery(new Term("foo", "baz"));

  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    is.search(new SpanNotQuery(query, query2), 5);
  });
  assertTrue(expected.getMessage().contains("was indexed without position data"));

  ir.close();
  dir.close();
}
 
源代码7 项目: lucene-solr   文件: TestRegexCompletionQuery.java
@Test
public void testEmptyRegexQuery() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  Document document = new Document();
  document.add(new SuggestField("suggest_field", "suggestion1", 1));
  iw.addDocument(document);

  if (rarely()) {
    iw.commit();
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  RegexCompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", ""));

  TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
  assertEquals(0, suggest.scoreDocs.length);

  reader.close();
  iw.close();
}
 
源代码8 项目: lucene-solr   文件: TestDoubleValuesSource.java
@BeforeClass
public static void beforeClass() throws Exception {
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  final int numDocs;
  if (TEST_NIGHTLY) {
    numDocs = TestUtil.nextInt(random(), 2049, 4000);
  } else {
    numDocs = atLeast(546);
  }
  for (int i = 0; i < numDocs; i++) {
    Document document = new Document();
    document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
    document.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
    document.add(new NumericDocValuesField("int", random().nextInt()));
    document.add(new NumericDocValuesField("long", random().nextLong()));
    document.add(new FloatDocValuesField("float", random().nextFloat()));
    document.add(new DoubleDocValuesField("double", random().nextDouble()));
    if (i == 545)
      document.add(new DoubleDocValuesField("onefield", LEAST_DOUBLE_VALUE));
    iw.addDocument(document);
  }
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
}
 
源代码9 项目: lucene-solr   文件: TestDocValuesQueries.java
public void testMissingField() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  iw.addDocument(new Document());
  IndexReader reader = iw.getReader();
  iw.close();
  IndexSearcher searcher = newSearcher(reader);
  for (Query query : Arrays.asList(
      NumericDocValuesField.newSlowRangeQuery("foo", 2, 4),
      SortedNumericDocValuesField.newSlowRangeQuery("foo", 2, 4),
      SortedDocValuesField.newSlowRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()),
      SortedSetDocValuesField.newSlowRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()))) {
    Weight w = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1);
    assertNull(w.scorer(searcher.getIndexReader().leaves().get(0)));
  }
  reader.close();
  dir.close();
}
 
源代码10 项目: lucene-solr   文件: TestMultiTermConstantScore.java
@BeforeClass
public static void beforeClass() throws Exception {
  String[] data = new String[] { "A 1 2 3 4 5 6", "Z       4 5 6", null,
      "B   2   4 5 6", "Y     3   5 6", null, "C     3     6",
      "X       4 5 6" };

  small = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), small, 
      newIndexWriterConfig(
          new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy()));

  FieldType customType = new FieldType(TextField.TYPE_STORED);
  customType.setTokenized(false);
  for (int i = 0; i < data.length; i++) {
    Document doc = new Document();
    doc.add(newField("id", String.valueOf(i), customType));// Field.Keyword("id",String.valueOf(i)));
    doc.add(newField("all", "all", customType));// Field.Keyword("all","all"));
    if (null != data[i]) {
      doc.add(newTextField("data", data[i], Field.Store.YES));// Field.Text("data",data[i]));
    }
    writer.addDocument(doc);
  }

  reader = writer.getReader();
  writer.close();
}
 
源代码11 项目: lucene-solr   文件: TestPrefixCompletionQuery.java
public void testAnalyzerNoPreservePosInc() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET);
  CompletionAnalyzer completionAnalyzer = new CompletionAnalyzer(analyzer, true, false);
  final String field = getTestName();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(completionAnalyzer, field));
  Document document = new Document();
  document.add(new SuggestField(field, "foobar", 7));
  document.add(new SuggestField(field, "foo bar", 8));
  document.add(new SuggestField(field, "the fo", 9));
  document.add(new SuggestField(field, "the foo bar", 10));
  document.add(new SuggestField(field, "foo the bar", 11)); // middle stopword
  document.add(new SuggestField(field, "baz the", 12)); // trailing stopword

  iw.addDocument(document);

  // note we use the completionAnalyzer with the queries (instead of input analyzer) because of non-default settings
  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
  CompletionQuery query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "fo"));
  TopSuggestDocs suggest = indexSearcher.suggest(query, 9, false); //matches all with fo
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
  // with leading stopword
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the fo")); // becomes "fo*"
  suggest = indexSearcher.suggest(query, 9, false);
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("the fo", 9), new Entry("foo bar", 8), new Entry("foobar", 7));
  // with middle stopword
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foo the bar")); // becomes "foo bar*"
  suggest = indexSearcher.suggest(query, 9, false);
  assertSuggestions(suggest, new Entry("foo the bar", 11), new Entry("the foo bar", 10), new Entry("foo bar", 8)); // no foobar
  // no space
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "foob"));
  suggest = indexSearcher.suggest(query, 4, false); // separators, thus only match "foobar"
  assertSuggestions(suggest, new Entry("foobar", 7));
  // surrounding stopwords
  query = new PrefixCompletionQuery(completionAnalyzer, new Term(field, "the baz the")); // becomes "baz*"
  suggest = indexSearcher.suggest(query, 4, false);// stopwords in query get removed so we match
  assertSuggestions(suggest, new Entry("baz the", 12));
  reader.close();
  iw.close();
}
 
@Override
public void setUp() throws Exception {
  super.setUp();
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  for (int i = 0; i < 1000; i++) {
    Document doc = new Document();
    doc.add(newStringField("field", Integer.toString(i), Field.Store.NO));
    doc.add(newStringField("field2", Boolean.toString(i % 2 == 0), Field.Store.NO));
    doc.add(new SortedDocValuesField("field2", new BytesRef(Boolean.toString(i % 2 == 0))));
    iw.addDocument(doc);
  }
  reader = iw.getReader();
  iw.close();
}
 
源代码13 项目: lucene-solr   文件: TestQueryRescorer.java
private IndexReader publishDocs(int numDocs, String fieldName, Directory dir) throws Exception {

    RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
    for (int i = 0; i < numDocs; i++) {
      Document d = new Document();
      d.add(newStringField("id", Integer.toString(i), Field.Store.YES));
      d.add(newTextField(fieldName, randomSentence(), Field.Store.NO));
      w.addDocument(d);
    }
    IndexReader reader = w.getReader();
    w.close();
    return reader;
  }
 
源代码14 项目: lucene-solr   文件: TestUnifiedHighlighter.java
public void testEncode() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test. Just a test highlighting from <i>postings</i>. Feel free to ignore.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer) {
    @Override
    protected PassageFormatter getFormatter(String field) {
      return new DefaultPassageFormatter("<b>", "</b>", "... ", true);
    }
  };
  Query query = new TermQuery(new Term("body", "highlighting"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(1, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(1, snippets.length);
  assertEquals("Just a test <b>highlighting</b> from &lt;i&gt;postings&lt;&#x2F;i&gt;. ", snippets[0]);

  ir.close();
}
 
源代码15 项目: lucene-solr   文件: TestUnifiedHighlighter.java
public void testMultiplePassages() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
  iw.addDocument(doc);
  body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new TermQuery(new Term("body", "test"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs, 2);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", snippets[0]);
  assertEquals("This <b>test</b> is another <b>test</b>. ... <b>Test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[1]);

  ir.close();
}
 
源代码16 项目: lucene-solr   文件: TestBooleanMinShouldMatch.java
@BeforeClass
    public static void beforeClass() throws Exception {
        String[] data = new String [] {
            "A 1 2 3 4 5 6",
            "Z       4 5 6",
            null,
            "B   2   4 5 6",
            "Y     3   5 6",
            null,
            "C     3     6",
            "X       4 5 6"
        };

        index = newDirectory();
        RandomIndexWriter w = new RandomIndexWriter(random(), index);

        for (int i = 0; i < data.length; i++) {
            Document doc = new Document();
            doc.add(newStringField("id", String.valueOf(i), Field.Store.YES));//Field.Keyword("id",String.valueOf(i)));
            doc.add(newStringField("all", "all", Field.Store.YES));//Field.Keyword("all","all"));
            if (null != data[i]) {
                doc.add(newTextField("data", data[i], Field.Store.YES));//Field.Text("data",data[i]));
            }
            w.addDocument(doc);
        }

        r = w.getReader();
        s = newSearcher(r);
        w.close();
//System.out.println("Set up " + getName());
    }
 
源代码17 项目: lucene-solr   文件: TestJoinUtil.java
public void testMinMaxDocs() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(
      random(),
      dir,
      newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.KEYWORD, false))
  );

  int minChildDocsPerParent = 2;
  int maxChildDocsPerParent = 16;
  int numParents = RandomNumbers.randomIntBetween(random(), 16, 64);
  int[] childDocsPerParent = new int[numParents];
  for (int p = 0; p < numParents; p++) {
    String parentId = Integer.toString(p);
    Document parentDoc = new Document();
    parentDoc.add(new StringField("id", parentId, Field.Store.YES));
    parentDoc.add(new StringField("type", "to", Field.Store.NO));
    parentDoc.add(new SortedDocValuesField("join_field", new BytesRef(parentId)));
    iw.addDocument(parentDoc);
    int numChildren = RandomNumbers.randomIntBetween(random(), minChildDocsPerParent, maxChildDocsPerParent);
    childDocsPerParent[p] = numChildren;
    for (int c = 0; c < numChildren; c++) {
      String childId = Integer.toString(p + c);
      Document childDoc = new Document();
      childDoc.add(new StringField("id", childId, Field.Store.YES));
      childDoc.add(new StringField("type", "from", Field.Store.NO));
      childDoc.add(new SortedDocValuesField("join_field", new BytesRef(parentId)));
      iw.addDocument(childDoc);
    }
  }
  iw.close();

  IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(dir));
  SortedDocValues[] values = new SortedDocValues[searcher.getIndexReader().leaves().size()];
  for (LeafReaderContext leadContext : searcher.getIndexReader().leaves()) {
    values[leadContext.ord] = DocValues.getSorted(leadContext.reader(), "join_field");
  }
  OrdinalMap ordinalMap = OrdinalMap.build(
      null, values, PackedInts.DEFAULT
  );
  Query fromQuery = new TermQuery(new Term("type", "from"));
  Query toQuery = new TermQuery(new Term("type", "to"));

  int iters = RandomNumbers.randomIntBetween(random(), 3, 9);
  for (int i = 1; i <= iters; i++) {
    final ScoreMode scoreMode = ScoreMode.values()[random().nextInt(ScoreMode.values().length)];
    int min = RandomNumbers.randomIntBetween(random(), minChildDocsPerParent, maxChildDocsPerParent - 1);
    int max = RandomNumbers.randomIntBetween(random(), min, maxChildDocsPerParent);
    if (VERBOSE) {
      System.out.println("iter=" + i);
      System.out.println("scoreMode=" + scoreMode);
      System.out.println("min=" + min);
      System.out.println("max=" + max);
    }
    Query joinQuery = JoinUtil.createJoinQuery("join_field", fromQuery, toQuery, searcher, scoreMode, ordinalMap, min, max);
    TotalHitCountCollector collector = new TotalHitCountCollector();
    searcher.search(joinQuery, collector);
    int expectedCount = 0;
    for (int numChildDocs : childDocsPerParent) {
      if (numChildDocs >= min && numChildDocs <= max) {
        expectedCount++;
      }
    }
    assertEquals(expectedCount, collector.getTotalHits());
  }

  searcher.getIndexReader().close();
  dir.close();
}
 
源代码18 项目: lucene-solr   文件: TestXYShape.java
/** test we can search for a point with a standard number of vertices*/
public void testBasicIntersects() throws Exception {
  int numVertices = TestUtil.nextInt(random(), 50, 100);
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a random polygon document
  XYPolygon p = ShapeTestUtil.createRegularPolygon(0, 90, atLeast(1000000), numVertices);
  Document document = new Document();
  addPolygonsToDoc(FIELDNAME, document, p);
  writer.addDocument(document);

  // add a line document
  document = new Document();
  // add a line string
  float x[] = new float[p.numPoints() - 1];
  float y[] = new float[p.numPoints() - 1];
  for (int i = 0; i < x.length; ++i) {
    x[i] = p.getPolyX(i);
    y[i] = p.getPolyY(i);
  }
  XYLine l = new XYLine(x, y);
  addLineToDoc(FIELDNAME, document, l);
  writer.addDocument(document);

  ////// search /////
  // search an intersecting bbox
  IndexReader reader = writer.getReader();
  writer.close();
  IndexSearcher searcher = newSearcher(reader);
  float minX = Math.min(x[0], x[1]);
  float minY = Math.min(y[0], y[1]);
  float maxX = Math.max(x[0], x[1]);
  float maxY = Math.max(y[0], y[1]);
  Query q = newRectQuery(FIELDNAME, minX, maxX, minY, maxY);
  assertEquals(2, searcher.count(q));

  // search a disjoint bbox
  q = newRectQuery(FIELDNAME, p.minX-1f, p.minX + 1f, p.minY - 1f, p.minY + 1f);
  assertEquals(0, searcher.count(q));

  // search w/ an intersecting polygon
  q = XYShape.newPolygonQuery(FIELDNAME, QueryRelation.INTERSECTS, new XYPolygon(
      new float[] {minX, minX, maxX, maxX, minX},
      new float[] {minY, maxY, maxY, minY, minY}
  ));
  assertEquals(2, searcher.count(q));

  // search w/ an intersecting line
  q = XYShape.newLineQuery(FIELDNAME, QueryRelation.INTERSECTS, new XYLine(
     new float[] {minX, minX, maxX, maxX},
     new float[] {minY, maxY, maxY, minY}
  ));
  assertEquals(2, searcher.count(q));

  IOUtils.close(reader, dir);
}
 
源代码19 项目: lucene-solr   文件: TestNumericRangeQuery32.java
@BeforeClass
public static void beforeClass() throws Exception {
  noDocs = atLeast(4096);
  distance = (1 << 30) / noDocs;
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new MockAnalyzer(random()))
      .setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
      .setMergePolicy(newLogMergePolicy()));
  
  final LegacyFieldType storedInt = new LegacyFieldType(LegacyIntField.TYPE_NOT_STORED);
  storedInt.setStored(true);
  storedInt.freeze();

  final LegacyFieldType storedInt8 = new LegacyFieldType(storedInt);
  storedInt8.setNumericPrecisionStep(8);

  final LegacyFieldType storedInt4 = new LegacyFieldType(storedInt);
  storedInt4.setNumericPrecisionStep(4);

  final LegacyFieldType storedInt2 = new LegacyFieldType(storedInt);
  storedInt2.setNumericPrecisionStep(2);

  final LegacyFieldType storedIntNone = new LegacyFieldType(storedInt);
  storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);

  final LegacyFieldType unstoredInt = LegacyIntField.TYPE_NOT_STORED;

  final LegacyFieldType unstoredInt8 = new LegacyFieldType(unstoredInt);
  unstoredInt8.setNumericPrecisionStep(8);

  final LegacyFieldType unstoredInt4 = new LegacyFieldType(unstoredInt);
  unstoredInt4.setNumericPrecisionStep(4);

  final LegacyFieldType unstoredInt2 = new LegacyFieldType(unstoredInt);
  unstoredInt2.setNumericPrecisionStep(2);

  LegacyIntField
    field8 = new LegacyIntField("field8", 0, storedInt8),
    field4 = new LegacyIntField("field4", 0, storedInt4),
    field2 = new LegacyIntField("field2", 0, storedInt2),
    fieldNoTrie = new LegacyIntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
    ascfield8 = new LegacyIntField("ascfield8", 0, unstoredInt8),
    ascfield4 = new LegacyIntField("ascfield4", 0, unstoredInt4),
    ascfield2 = new LegacyIntField("ascfield2", 0, unstoredInt2);
  
  Document doc = new Document();
  // add fields, that have a distance to test general functionality
  doc.add(field8); doc.add(field4); doc.add(field2); doc.add(fieldNoTrie);
  // add ascending fields with a distance of 1, beginning at -noDocs/2 to test the correct splitting of range and inclusive/exclusive
  doc.add(ascfield8); doc.add(ascfield4); doc.add(ascfield2);
  
  // Add a series of noDocs docs with increasing int values
  for (int l=0; l<noDocs; l++) {
    int val=distance*l+startOffset;
    field8.setIntValue(val);
    field4.setIntValue(val);
    field2.setIntValue(val);
    fieldNoTrie.setIntValue(val);

    val=l-(noDocs/2);
    ascfield8.setIntValue(val);
    ascfield4.setIntValue(val);
    ascfield2.setIntValue(val);
    writer.addDocument(doc);
  }

  reader = writer.getReader();
  searcher=newSearcher(reader);
  writer.close();
}
 
源代码20 项目: lucene-solr   文件: TestUnifiedHighlighterMTQ.java
public void testWildcardInBoolean() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  BooleanQuery query = new BooleanQuery.Builder()
      .add(new WildcardQuery(new Term("body", "te*")), BooleanClause.Occur.SHOULD)
      .build();
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  // must not
  query = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD)
      .add(new WildcardQuery(new Term("bogus", "te*")), BooleanClause.Occur.MUST_NOT)
      .build();
  topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a test.", snippets[0]);
  assertEquals("Test a one sentence document.", snippets[1]);

  ir.close();
}