org.apache.lucene.index.RandomIndexWriter#forceMerge ( )源码实例Demo

下面列出了org.apache.lucene.index.RandomIndexWriter#forceMerge ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: TestSpanCollection.java
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < docFields.length; i++) {
    Document doc = new Document();
    doc.add(newField(FIELD, docFields[i], OFFSETS));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(getOnlyLeafReader(reader));
}
 
源代码2 项目: lucene-solr   文件: TestConstantScoreScorer.java
TestConstantScoreScorerIndex() throws IOException {
  directory = newDirectory();

  writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean())));

  for (String VALUE : VALUES) {
    Document doc = new Document();
    doc.add(newTextField(FIELD, VALUE, Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);

  reader = writer.getReader();
  writer.close();
}
 
源代码3 项目: lucene-solr   文件: TestTermScorer.java
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory, 
      newIndexWriterConfig(new MockAnalyzer(random()))
      .setMergePolicy(newLogMergePolicy())
      .setSimilarity(new ClassicSimilarity()));
  for (int i = 0; i < values.length; i++) {
    Document doc = new Document();
    doc.add(newTextField(FIELD, values[i], Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  indexReader = getOnlyLeafReader(writer.getReader());
  writer.close();
  indexSearcher = newSearcher(indexReader, false);
  indexSearcher.setSimilarity(new ClassicSimilarity());
}
 
源代码4 项目: lucene-solr   文件: FunctionTestSetup.java
protected static void createIndex(boolean doMultiSegment) throws Exception {
  if (VERBOSE) {
    System.out.println("TEST: setUp");
  }
  // prepare a small index with just a few documents.
  dir = newDirectory();
  anlzr = new MockAnalyzer(random());
  IndexWriterConfig iwc = newIndexWriterConfig(anlzr).setMergePolicy(newLogMergePolicy());
  if (doMultiSegment) {
    iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 7));
  }
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
  // add docs not exactly in natural ID order, to verify we do check the order of docs by scores
  int remaining = N_DOCS;
  boolean done[] = new boolean[N_DOCS];
  int i = 0;
  while (remaining > 0) {
    if (done[i]) {
      throw new Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
    }
    addDoc(iw, i);
    done[i] = true;
    i = (i + 4) % N_DOCS;
    remaining --;
  }
  if (!doMultiSegment) {
    if (VERBOSE) {
      System.out.println("TEST: setUp full merge");
    }
    iw.forceMerge(1);
  }
  iw.close();
  if (VERBOSE) {
    System.out.println("TEST: setUp done close");
  }
}
 
源代码5 项目: lucene-solr   文件: TestFeatureDoubleValues.java
public void testFeature() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  doc.add(new FeatureField("field", "name", 30F));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 1F));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 4F));
  writer.addDocument(doc);
  writer.forceMerge(1);
  IndexReader ir = writer.getReader();
  writer.close();

  assertEquals(1, ir.leaves().size());
  LeafReaderContext context = ir.leaves().get(0);
  DoubleValuesSource valuesSource = FeatureField.newDoubleValues("field", "name");
  DoubleValues values = valuesSource.getValues(context, null);

  assertTrue(values.advanceExact(0));
  assertEquals(30, values.doubleValue(), 0f);
  assertTrue(values.advanceExact(1));
  assertEquals(1, values.doubleValue(), 0f);
  assertTrue(values.advanceExact(2));
  assertEquals(4, values.doubleValue(), 0f);
  assertFalse(values.advanceExact(3));

  ir.close();
  dir.close();
}
 
源代码6 项目: lucene-solr   文件: TestBlockJoin.java
public void testToChildInitialAdvanceParentButNoKids() throws Exception {

    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);

    // degenerate case: first doc has no children
    w.addDocument(makeResume("first", "nokids"));
    w.addDocuments(Arrays.asList(makeJob("job", 42), makeResume("second", "haskid")));

    // single segment
    w.forceMerge(1);

    final IndexReader r = w.getReader();
    final IndexSearcher s = newSearcher(r, false);
    w.close();

    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("docType", "resume")));
    Query parentQuery = new TermQuery(new Term("docType", "resume"));

    ToChildBlockJoinQuery parentJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentFilter);

    Weight weight = s.createWeight(s.rewrite(parentJoinQuery), RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()), 1);
    Scorer advancingScorer = weight.scorer(s.getIndexReader().leaves().get(0));
    Scorer nextDocScorer = weight.scorer(s.getIndexReader().leaves().get(0));

    final int firstKid = nextDocScorer.iterator().nextDoc();
    assertTrue("firstKid not found", DocIdSetIterator.NO_MORE_DOCS != firstKid);
    assertEquals(firstKid, advancingScorer.iterator().advance(0));

    r.close();
    dir.close();
  }
 
源代码7 项目: lucene-solr   文件: TestLatLonShape.java
public void testLUCENE9055() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  // test polygons:
  //[5, 5], [10, 6], [10, 10], [5, 10], [5, 5] ]
  Polygon indexPoly1 = new Polygon(
      new double[] {5d, 6d, 10d, 10d, 5d},
      new double[] {5d, 10d, 10d, 5d, 5d}
  );

  // [ [6, 6], [9, 6], [9, 9], [6, 9], [6, 6] ]
  Polygon indexPoly2 = new Polygon(
      new double[] {6d, 6d, 9d, 9d, 6d},
      new double[] {6d, 9d, 9d, 6d, 6d}
  );

  // index polygons:
  Document doc;
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly1);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly2);
  w.addDocument(doc);
  w.forceMerge(1);

  ///// search //////
  IndexReader reader = w.getReader();
  w.close();
  IndexSearcher searcher = newSearcher(reader);

  // [ [0, 0], [5, 5], [7, 7] ]
  Line searchLine = new Line(new double[] {0, 5, 7}, new double[] {0, 5, 7});


  Query q = LatLonShape.newLineQuery(FIELDNAME, QueryRelation.INTERSECTS, searchLine);
  assertEquals(2, searcher.count(q));

  IOUtils.close(w, reader, dir);
}
 
源代码8 项目: lucene-solr   文件: TestMultiTermQueryRewrites.java
@BeforeClass
public static void beforeClass() throws Exception {
  dir = newDirectory();
  sdir1 = newDirectory();
  sdir2 = newDirectory();
  final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random()));
  final RandomIndexWriter swriter1 = new RandomIndexWriter(random(), sdir1, new MockAnalyzer(random()));
  final RandomIndexWriter swriter2 = new RandomIndexWriter(random(), sdir2, new MockAnalyzer(random()));

  for (int i = 0; i < 10; i++) {
    Document doc = new Document();
    doc.add(newStringField("data", Integer.toString(i), Field.Store.NO));
    writer.addDocument(doc);
    ((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc);
  }
  writer.forceMerge(1); swriter1.forceMerge(1); swriter2.forceMerge(1);
  writer.close(); swriter1.close(); swriter2.close();
  
  reader = DirectoryReader.open(dir);
  searcher = newSearcher(reader);
  
  multiReader = new MultiReader(new IndexReader[] {
    DirectoryReader.open(sdir1), DirectoryReader.open(sdir2) 
  }, true);
  multiSearcher = newSearcher(multiReader);
  
  multiReaderDupls = new MultiReader(new IndexReader[] {
    DirectoryReader.open(sdir1), DirectoryReader.open(dir) 
  }, true);
  multiSearcherDupls = newSearcher(multiReaderDupls);
}
 
源代码9 项目: lucene-solr   文件: TestMinShouldMatch2.java
@BeforeClass
public static void beforeClass() throws Exception {
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  final int numDocs = atLeast(300);
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    
    addSome(doc, alwaysTerms);
    
    if (random().nextInt(100) < 90) {
      addSome(doc, commonTerms);
    }
    if (random().nextInt(100) < 50) {
      addSome(doc, mediumTerms);
    }
    if (random().nextInt(100) < 10) {
      addSome(doc, rareTerms);
    }
    iw.addDocument(doc);
  }
  iw.forceMerge(1);
  iw.close();
  r = DirectoryReader.open(dir);
  reader = getOnlyLeafReader(r);
  searcher = new IndexSearcher(reader);
  searcher.setSimilarity(new ClassicSimilarity());
}
 
源代码10 项目: lucene-solr   文件: TestNearSpansOrdered.java
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < docFields.length; i++) {
    Document doc = new Document();
    doc.add(newTextField(FIELD, docFields[i], Field.Store.NO));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(getOnlyLeafReader(reader));
}
 
源代码11 项目: lucene-solr   文件: TestSpanContainQuery.java
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < docFields.length; i++) {
    Document doc = new Document();
    doc.add(newTextField(field, docFields[i], Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(getOnlyLeafReader(reader));
}
 
@Before
public void setupIndex() throws IOException {
    dirUnderTest = newDirectory();
    List<Similarity> sims = Arrays.asList(
            new ClassicSimilarity(),
            new SweetSpotSimilarity(), // extends Classic
            new BM25Similarity(),
            new LMDirichletSimilarity(),
            new BooleanSimilarity(),
            new LMJelinekMercerSimilarity(0.2F),
            new AxiomaticF3LOG(0.5F, 10),
            new DFISimilarity(new IndependenceChiSquared()),
            new DFRSimilarity(new BasicModelG(), new AfterEffectB(), new NormalizationH1()),
            new IBSimilarity(new DistributionLL(), new LambdaDF(), new NormalizationH3())
        );
    similarity = sims.get(random().nextInt(sims.size()));

    indexWriterUnderTest = new RandomIndexWriter(random(), dirUnderTest, newIndexWriterConfig().setSimilarity(similarity));
    for (int i = 0; i < docs.length; i++) {
        Document doc = new Document();
        doc.add(newStringField("id", "" + i, Field.Store.YES));
        doc.add(newField("field", docs[i], Store.YES));
        indexWriterUnderTest.addDocument(doc);
    }
    indexWriterUnderTest.commit();
    indexWriterUnderTest.forceMerge(1);
    indexWriterUnderTest.flush();


    indexReaderUnderTest = indexWriterUnderTest.getReader();
    searcherUnderTest = newSearcher(indexReaderUnderTest);
    searcherUnderTest.setSimilarity(similarity);
}
 
源代码13 项目: lucene-solr   文件: TestPointQueries.java
public void testRangeOptimizesIfAllPointsMatch() throws IOException {
  final int numDims = TestUtil.nextInt(random(), 1, 3);
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  int[] value = new int[numDims];
  for (int i = 0; i < numDims; ++i) {
    value[i] = TestUtil.nextInt(random(), 1, 10);
  }
  doc.add(new IntPoint("point", value));
  w.addDocument(doc);
  IndexReader reader = w.getReader();
  IndexSearcher searcher = new IndexSearcher(reader);
  searcher.setQueryCache(null);
  int[] lowerBound = new int[numDims];
  int[] upperBound = new int[numDims];
  for (int i = 0; i < numDims; ++i) {
    lowerBound[i] = value[i] - random().nextInt(1);
    upperBound[i] = value[i] + random().nextInt(1);
  }
  Query query = IntPoint.newRangeQuery("point", lowerBound, upperBound);
  Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
  Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
  assertEquals(DocIdSetIterator.all(1).getClass(), scorer.iterator().getClass());

  // When not all documents in the query have a value, the optimization is not applicable
  reader.close();
  w.addDocument(new Document());
  w.forceMerge(1);
  reader = w.getReader();
  searcher = new IndexSearcher(reader);
  searcher.setQueryCache(null);
  weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
  scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
  assertFalse(DocIdSetIterator.all(1).getClass().equals(scorer.iterator().getClass()));

  reader.close();
  w.close();
  dir.close();
}
 
源代码14 项目: lucene-solr   文件: TestLatLonShape.java
public void testLUCENE8736() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  // test polygons:
  Polygon indexPoly1 = new Polygon(
      new double[] {4d, 4d, 3d, 3d, 4d},
      new double[] {3d, 4d, 4d, 3d, 3d}
  );

  Polygon indexPoly2 = new Polygon(
      new double[] {2d, 2d, 1d, 1d, 2d},
      new double[] {6d, 7d, 7d, 6d, 6d}
  );

  Polygon indexPoly3 = new Polygon(
      new double[] {1d, 1d, 0d, 0d, 1d},
      new double[] {3d, 4d, 4d, 3d, 3d}
  );

  Polygon indexPoly4 = new Polygon(
      new double[] {2d, 2d, 1d, 1d, 2d},
      new double[] {0d, 1d, 1d, 0d, 0d}
  );

  // index polygons:
  Document doc;
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly1);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly2);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly3);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly4);
  w.addDocument(doc);
  w.forceMerge(1);

  ///// search //////
  IndexReader reader = w.getReader();
  w.close();
  IndexSearcher searcher = newSearcher(reader);

  Polygon[] searchPoly = new Polygon[] {
      new Polygon(new double[] {4d, 4d, 0d, 0d, 4d},
          new double[] {0d, 7d, 7d, 0d, 0d})
  };

  Query q = LatLonShape.newPolygonQuery(FIELDNAME, QueryRelation.WITHIN, searchPoly);
  assertEquals(4, searcher.count(q));

  IOUtils.close(w, reader, dir);
}
 
源代码15 项目: lucene-solr   文件: TestFieldMaskingSpanQuery.java
@BeforeClass
public static void beforeClass() throws Exception {
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  
  writer.addDocument(doc(new Field[] { field("id", "0")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "jones")     }));
                                             
  writer.addDocument(doc(new Field[] { field("id", "1")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "jones")     }));
  
  writer.addDocument(doc(new Field[] { field("id", "2")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "greta"),
                                       field("last",   "jones")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "jones")     }));
   
  writer.addDocument(doc(new Field[] { field("id", "3")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "lisa"),
                                       field("last",   "jones")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "bob"),
                                       field("last",   "costas")     }));
  
  writer.addDocument(doc(new Field[] { field("id", "4")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "linda"),
                                       field("last",   "dixit")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "bubba"),
                                       field("last",   "jones")     }));
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = new IndexSearcher(getOnlyLeafReader(reader));
}
 
源代码16 项目: lucene-solr   文件: TestDisjunctionMaxQuery.java
@Override
public void setUp() throws Exception {
  super.setUp();
  
  index = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), index,
      newIndexWriterConfig(new MockAnalyzer(random()))
                           .setSimilarity(sim).setMergePolicy(newLogMergePolicy()));
  
  // hed is the most important field, dek is secondary
  
  // d1 is an "ok" match for: albino elephant
  {
    Document d1 = new Document();
    d1.add(newField("id", "d1", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d1"));
    d1
        .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
    d1
        .add(newTextField("dek", "elephant", Field.Store.YES));// Field.Text("dek", "elephant"));
    writer.addDocument(d1);
  }
  
  // d2 is a "good" match for: albino elephant
  {
    Document d2 = new Document();
    d2.add(newField("id", "d2", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d2"));
    d2
        .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
    d2.add(newTextField("dek", "albino", Field.Store.YES));// Field.Text("dek",
                                                                              // "albino"));
    d2
        .add(newTextField("dek", "elephant", Field.Store.YES));// Field.Text("dek", "elephant"));
    writer.addDocument(d2);
  }
  
  // d3 is a "better" match for: albino elephant
  {
    Document d3 = new Document();
    d3.add(newField("id", "d3", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d3"));
    d3.add(newTextField("hed", "albino", Field.Store.YES));// Field.Text("hed",
                                                                              // "albino"));
    d3
        .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
    writer.addDocument(d3);
  }
  
  // d4 is the "best" match for: albino elephant
  {
    Document d4 = new Document();
    d4.add(newField("id", "d4", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d4"));
    d4.add(newTextField("hed", "albino", Field.Store.YES));// Field.Text("hed",
                                                                              // "albino"));
    d4
        .add(newField("hed", "elephant", nonAnalyzedType));// Field.Text("hed", "elephant"));
    d4.add(newTextField("dek", "albino", Field.Store.YES));// Field.Text("dek",
                                                                              // "albino"));
    writer.addDocument(d4);
  }
  
  writer.forceMerge(1);
  r = getOnlyLeafReader(writer.getReader());
  writer.close();
  s = new IndexSearcher(r);
  s.setSimilarity(sim);
}
 
源代码17 项目: lucene-solr   文件: TestBooleanScorer.java
public void testSparseClauseOptimization() throws IOException {
  // When some windows have only one scorer that can match, the scorer will
  // directly call the collector in this window
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document emptyDoc = new Document();
  final int numDocs = atLeast(10);
  int numEmptyDocs = atLeast(200);
  for (int d = 0; d < numDocs; ++d) {
    for (int i = numEmptyDocs; i >= 0; --i) {
      w.addDocument(emptyDoc);
    }
    Document doc = new Document();
    for (String value : Arrays.asList("foo", "bar", "baz")) {
      if (random().nextBoolean()) {
        doc.add(new StringField("field", value, Store.NO));
      }
    }
  }
  numEmptyDocs = atLeast(200);
  for (int i = numEmptyDocs; i >= 0; --i) {
    w.addDocument(emptyDoc);
  }
  if (random().nextBoolean()) {
    w.forceMerge(1);
  }
  IndexReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);

  Query query = new BooleanQuery.Builder()
    .add(new BoostQuery(new TermQuery(new Term("field", "foo")), 3), Occur.SHOULD)
    .add(new BoostQuery(new TermQuery(new Term("field", "bar")), 3), Occur.SHOULD)
    .add(new BoostQuery(new TermQuery(new Term("field", "baz")), 3), Occur.SHOULD)
    .build();

  // duel BS1 vs. BS2
  QueryUtils.check(random(), query, searcher);

  reader.close();
  w.close();
  dir.close();
}
 
源代码18 项目: lucene-solr   文件: TestFieldCache.java
public void testIntFieldCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
  cfg.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
  Document doc = new Document();
  IntPoint field = new IntPoint("f", 0);
  doc.add(field);
  final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
  Set<Integer> missing = new HashSet<>();
  for (int i = 0; i < values.length; ++i) {
    final int v;
    switch (random().nextInt(10)) {
      case 0:
        v = Integer.MIN_VALUE;
        break;
      case 1:
        v = 0;
        break;
      case 2:
        v = Integer.MAX_VALUE;
        break;
      default:
        v = TestUtil.nextInt(random(), -10, 10);
        break;
    }
    values[i] = v;
    if (v == 0 && random().nextBoolean()) {
      // missing
      iw.addDocument(new Document());
      missing.add(i);
    } else {
      field.setIntValue(v);
      iw.addDocument(doc);
    }
  }
  iw.forceMerge(1);
  final DirectoryReader reader = iw.getReader();
  final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.INT_POINT_PARSER);
  for (int i = 0; i < values.length; ++i) {
    if (missing.contains(i) == false) {
      assertEquals(i, ints.nextDoc());
      assertEquals(values[i], ints.longValue());
    }
  }
  assertEquals(NO_MORE_DOCS, ints.nextDoc());
  reader.close();
  iw.close();
  dir.close();
}
 
源代码19 项目: lucene-solr   文件: TestLegacyFieldCache.java
public void testIntFieldCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
  cfg.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
  Document doc = new Document();
  LegacyIntField field = new LegacyIntField("f", 0, Store.YES);
  doc.add(field);
  final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
  Set<Integer> missing = new HashSet<>();
  for (int i = 0; i < values.length; ++i) {
    final int v;
    switch (random().nextInt(10)) {
      case 0:
        v = Integer.MIN_VALUE;
        break;
      case 1:
        v = 0;
        break;
      case 2:
        v = Integer.MAX_VALUE;
        break;
      default:
        v = TestUtil.nextInt(random(), -10, 10);
        break;
    }
    values[i] = v;
    if (v == 0 && random().nextBoolean()) {
      // missing
      iw.addDocument(new Document());
      missing.add(i);
    } else {
      field.setIntValue(v);
      iw.addDocument(doc);
    }
  }
  iw.forceMerge(1);
  final DirectoryReader reader = iw.getReader();
  final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.LEGACY_INT_PARSER);
  for (int i = 0; i < values.length; ++i) {
    if (missing.contains(i) == false) {
      assertEquals(i, ints.nextDoc());
      assertEquals(values[i], ints.longValue());
    }
  }
  assertEquals(NO_MORE_DOCS, ints.nextDoc());
  reader.close();
  iw.close();
  dir.close();
}
 
源代码20 项目: lucene-solr   文件: TestFeatureDoubleValues.java
public void testFeatureMultipleMissing() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 1F));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 4F));
  writer.addDocument(doc);
  writer.forceMerge(1);
  IndexReader ir = writer.getReader();
  writer.close();

  assertEquals(1, ir.leaves().size());
  LeafReaderContext context = ir.leaves().get(0);
  DoubleValuesSource valuesSource = FeatureField.newDoubleValues("field", "name");
  DoubleValues values = valuesSource.getValues(context, null);

  assertFalse(values.advanceExact(0));
  assertFalse(values.advanceExact(1));
  assertFalse(values.advanceExact(2));
  assertFalse(values.advanceExact(3));
  assertFalse(values.advanceExact(4));
  assertTrue(values.advanceExact(5));
  assertEquals(1, values.doubleValue(), 0f);
  assertTrue(values.advanceExact(6));
  assertEquals(4, values.doubleValue(), 0f);
  assertFalse(values.advanceExact(7));

  ir.close();
  dir.close();
}