org.apache.lucene.index.IndexDeletionPolicy#org.apache.lucene.index.NoMergePolicy源码实例Demo

下面列出了org.apache.lucene.index.IndexDeletionPolicy#org.apache.lucene.index.NoMergePolicy 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: TestPayloadScoreQuery.java
@BeforeClass
public static void beforeClass() throws Exception {
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new PayloadAnalyzer())
          .setMergePolicy(NoMergePolicy.INSTANCE));
  //writer.infoStream = System.out;
  for (int i = 0; i < 300; i++) {
    Document doc = new Document();
    doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES));
    String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1);
    doc.add(newTextField("field2", txt, Field.Store.YES));
    writer.addDocument(doc);
  }
  reader = writer.getReader();
  writer.close();

  searcher = newSearcher(reader);
  searcher.setSimilarity(new JustScorePayloadSimilarity());
}
 
源代码2 项目: lucene-solr   文件: TestQueryBitSetProducer.java
public void testSimple() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();

  QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery());
  assertNull(producer.getBitSet(reader.leaves().get(0)));
  assertEquals(1, producer.cache.size());

  producer = new QueryBitSetProducer(new MatchAllDocsQuery());
  BitSet bitSet = producer.getBitSet(reader.leaves().get(0));
  assertEquals(1, bitSet.length());
  assertEquals(true, bitSet.get(0));
  assertEquals(1, producer.cache.size());

  IOUtils.close(reader, w, dir);
}
 
源代码3 项目: lucene-solr   文件: TestQueryBitSetProducer.java
public void testReaderNotSuitedForCaching() throws IOException{
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = new DummyDirectoryReader(w.getReader());

  QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery());
  assertNull(producer.getBitSet(reader.leaves().get(0)));
  assertEquals(0, producer.cache.size());

  producer = new QueryBitSetProducer(new MatchAllDocsQuery());
  BitSet bitSet = producer.getBitSet(reader.leaves().get(0));
  assertEquals(1, bitSet.length());
  assertEquals(true, bitSet.get(0));
  assertEquals(0, producer.cache.size());

  IOUtils.close(reader, w, dir);
}
 
源代码4 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testReaderNotSuitedForCaching() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = new DummyDirectoryReader(w.getReader());
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  // don't cache if the reader does not expose a cache helper
  assertNull(reader.leaves().get(0).reader().getCoreCacheHelper());
  LRUQueryCache cache = new LRUQueryCache(2, 10000, context -> true, Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  assertEquals(0, searcher.count(new DummyQuery()));
  assertEquals(0, cache.getCacheCount());
  reader.close();
  w.close();
  dir.close();
}
 
源代码5 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testPropagatesScorerSupplier() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(NEVER_CACHE);

  LRUQueryCache cache = new LRUQueryCache(1, 1000);
  searcher.setQueryCache(cache);

  AtomicBoolean scorerCreated = new AtomicBoolean(false);
  Query query = new DummyQuery2(scorerCreated);
  Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
  ScorerSupplier supplier = weight.scorerSupplier(searcher.getIndexReader().leaves().get(0));
  assertFalse(scorerCreated.get());
  supplier.get(random().nextLong() & 0x7FFFFFFFFFFFFFFFL);
  assertTrue(scorerCreated.get());

  reader.close();
  w.close();
  dir.close();
}
 
源代码6 项目: lucene-solr   文件: TestTopDocsCollector.java
public void testSharedCountCollectorManager() throws Exception {
  Query q = new MatchAllDocsQuery();
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  TopDocsCollector<ScoreDoc> collector = doSearchWithThreshold( 5, 10, q, reader);
  TopDocs tdc = doConcurrentSearchWithThreshold(5, 10, q, reader);
  TopDocs tdc2 = collector.topDocs();

  CheckHits.checkEqual(q, tdc.scoreDocs, tdc2.scoreDocs);

  reader.close();
  dir.close();
}
 
源代码7 项目: crate   文件: Lucene.java
/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails.
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
            .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
            .setMergePolicy(NoMergePolicy.INSTANCE) // no merges
            .setCommitOnClose(false) // no commits
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { // force creation - don't append...
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}
 
源代码8 项目: lucene-solr   文件: OverviewTestBase.java
private Path createIndex() throws IOException {
  Path indexDir = createTempDir();

  Directory dir = newFSDirectory(indexDir);
  IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
  config.setMergePolicy(NoMergePolicy.INSTANCE);  // see LUCENE-8998
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);

  Document doc1 = new Document();
  doc1.add(newStringField("f1", "1", Field.Store.NO));
  doc1.add(newTextField("f2", "a b c d e", Field.Store.NO));
  writer.addDocument(doc1);

  Document doc2 = new Document();
  doc2.add(newStringField("f1", "2", Field.Store.NO));
  doc2.add(new TextField("f2", "a c", Field.Store.NO));
  writer.addDocument(doc2);

  Document doc3 = new Document();
  doc3.add(newStringField("f1", "3", Field.Store.NO));
  doc3.add(newTextField("f2", "a f", Field.Store.NO));
  writer.addDocument(doc3);

  Map<String, String> userData = new HashMap<>();
  userData.put("data", "val");
  writer.w.setLiveCommitData(userData.entrySet());

  writer.commit();

  writer.close();
  dir.close();

  return indexDir;
}
 
源代码9 项目: lucene-solr   文件: TestTaxonomyFacetCounts.java
public void testSegmentsWithoutCategoriesOrResults() throws Exception {
  // tests the accumulator when there are segments with no results
  Directory indexDir = newDirectory();
  Directory taxoDir = newDirectory();
  
  IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges
  IndexWriter indexWriter = new IndexWriter(indexDir, iwc);

  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
  FacetsConfig config = new FacetsConfig();
  indexTwoDocs(taxoWriter, indexWriter, config, false); // 1st segment, no content, with categories
  indexTwoDocs(taxoWriter, indexWriter, null, true);         // 2nd segment, with content, no categories
  indexTwoDocs(taxoWriter, indexWriter, config, true);  // 3rd segment ok
  indexTwoDocs(taxoWriter, indexWriter, null, false);        // 4th segment, no content, or categories
  indexTwoDocs(taxoWriter, indexWriter, null, true);         // 5th segment, with content, no categories
  indexTwoDocs(taxoWriter, indexWriter, config, true);  // 6th segment, with content, with categories
  indexTwoDocs(taxoWriter, indexWriter, null, true);         // 7th segment, with content, no categories
  indexWriter.close();
  IOUtils.close(taxoWriter);

  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
  IndexSearcher indexSearcher = newSearcher(indexReader);
  
  // search for "f:a", only segments 1 and 3 should match results
  Query q = new TermQuery(new Term("f", "a"));
  FacetsCollector sfc = new FacetsCollector();
  indexSearcher.search(q, sfc);
  Facets facets = getTaxonomyFacetCounts(taxoReader, config, sfc);
  FacetResult result = facets.getTopChildren(10, "A");
  assertEquals("wrong number of children", 2, result.labelValues.length);
  for (LabelAndValue labelValue : result.labelValues) {
    assertEquals("wrong weight for child " + labelValue.label, 2, labelValue.value.intValue());
  }

  IOUtils.close(indexReader, taxoReader, indexDir, taxoDir);
}
 
源代码10 项目: lucene-solr   文件: TestTaxonomyFacetCounts2.java
@BeforeClass
public static void beforeClassCountingFacetsAggregatorTest() throws Exception {
  indexDir = newDirectory();
  taxoDir = newDirectory();
  
  // create an index which has:
  // 1. Segment with no categories, but matching results
  // 2. Segment w/ categories, but no results
  // 3. Segment w/ categories and results
  // 4. Segment w/ categories, but only some results
  
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges, so we can control the index segments
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);

  allExpectedCounts = newCounts();
  termExpectedCounts = newCounts();
  
  // segment w/ no categories
  indexDocsNoFacets(indexWriter);

  // segment w/ categories, no content
  indexDocsWithFacetsNoTerms(indexWriter, taxoWriter, allExpectedCounts);

  // segment w/ categories and content
  indexDocsWithFacetsAndTerms(indexWriter, taxoWriter, allExpectedCounts);
  
  // segment w/ categories and some content
  indexDocsWithFacetsAndSomeTerms(indexWriter, taxoWriter, allExpectedCounts);

  indexWriter.close();
  IOUtils.close(taxoWriter);
}
 
源代码11 项目: lucene-solr   文件: TestCheckJoinIndex.java
public void testInconsistentDeletes() throws IOException {
  final Directory dir = newDirectory();
  final IndexWriterConfig iwc = newIndexWriterConfig();
  iwc.setMergePolicy(NoMergePolicy.INSTANCE); // so that deletions don't trigger merges
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);

  List<Document> block = new ArrayList<>();
  final int numChildren = TestUtil.nextInt(random(), 1, 3);
  for (int i = 0; i < numChildren; ++i) {
    Document doc = new Document();
    doc.add(new StringField("child", Integer.toString(i), Store.NO));
    block.add(doc);
  }
  Document parent = new Document();
  parent.add(new StringField("parent", "true", Store.NO));
  block.add(parent);
  w.addDocuments(block);

  if (random().nextBoolean()) {
    w.deleteDocuments(new Term("parent", "true"));
  } else {
    // delete any of the children
    w.deleteDocuments(new Term("child", Integer.toString(random().nextInt(numChildren))));
  }

  final IndexReader reader = w.getReader();
  w.close();

  BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("parent", "true")));
  try {
    expectThrows(IllegalStateException.class, () -> CheckJoinIndex.check(reader, parentsFilter));
  } finally {
    reader.close();
    dir.close();
  }
}
 
源代码12 项目: lucene-solr   文件: TestPhraseWildcardQuery.java
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), directory,
                                               newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)); // do not accidentally merge
                                                                                                               // the two segments we create
                                                                                                               // here
  iw.setDoRandomForceMerge(false); // Keep the segments separated.
  addSegments(iw);
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
  assertEquals("test test relies on 2 segments", 2, searcher.getIndexReader().leaves().size());
}
 
源代码13 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testMinSegmentSizePredicate() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  LRUQueryCache cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(2, 0f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(0, cache.getCacheCount());

  cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(1, 0f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(1, cache.getCacheCount());

  cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(0, .6f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(1, cache.getCacheCount());

  w.addDocument(new Document());
  reader.close();
  reader = w.getReader();
  searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(0, .6f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(0, cache.getCacheCount());

  reader.close();
  w.close();
  dir.close();
}
 
源代码14 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testQueryNotSuitedForCaching() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  LRUQueryCache cache = new LRUQueryCache(2, 10000, context -> true, Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);

  assertEquals(0, searcher.count(new NoCacheQuery()));
  assertEquals(0, cache.getCacheCount());

  // BooleanQuery wrapping an uncacheable query should also not be cached
  BooleanQuery bq = new BooleanQuery.Builder()
      .add(new NoCacheQuery(), Occur.MUST)
      .add(new TermQuery(new Term("field", "term")), Occur.MUST).build();
  assertEquals(0, searcher.count(bq));
  assertEquals(0, cache.getCacheCount());

  reader.close();
  w.close();
  dir.close();

}
 
源代码15 项目: lucene-solr   文件: TestTopDocsCollector.java
public void testRelationVsTopDocsCount() throws Exception {
  try (Directory dir = newDirectory();
      IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE))) {
    Document doc = new Document();
    doc.add(new TextField("f", "foo bar", Store.NO));
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    
    try (IndexReader reader = DirectoryReader.open(w)) {
      IndexSearcher searcher = new IndexSearcher(reader);
      TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 10);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopScoreDocCollector.create(2, null, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertTrue(10 >= collector.totalHits);
      assertEquals(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopScoreDocCollector.create(10, null, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
    }
  }
}
 
源代码16 项目: lucene-solr   文件: TestTopFieldCollector.java
public void testRelationVsTopDocsCount() throws Exception {
  Sort sort = new Sort(SortField.FIELD_SCORE, SortField.FIELD_DOC);
  try (Directory dir = newDirectory();
      IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE))) {
    Document doc = new Document();
    doc.add(new TextField("f", "foo bar", Store.NO));
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    
    try (IndexReader reader = DirectoryReader.open(w)) {
      IndexSearcher searcher = new IndexSearcher(reader);
      TopFieldCollector collector = TopFieldCollector.create(sort, 2, 10);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopFieldCollector.create(sort, 2, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertTrue(10 >= collector.totalHits);
      assertEquals(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopFieldCollector.create(sort, 10, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
    }
  }
}
 
源代码17 项目: lucene-solr   文件: SolrSnapshotManager.java
/**
 * This method deletes index files of the {@linkplain IndexCommit} for the specified generation number.
 *
 * @param core The Solr core
 * @param dir The index directory storing the snapshot.
 * @throws IOException in case of I/O errors.
 */

@SuppressWarnings({"try", "unused"})
private static void deleteSnapshotIndexFiles(SolrCore core, Directory dir, IndexDeletionPolicy delPolicy) throws IOException {
  IndexWriterConfig conf = core.getSolrConfig().indexConfig.toIndexWriterConfig(core);
  conf.setOpenMode(OpenMode.APPEND);
  conf.setMergePolicy(NoMergePolicy.INSTANCE);//Don't want to merge any commits here!
  conf.setIndexDeletionPolicy(delPolicy);
  conf.setCodec(core.getCodec());
  try (SolrIndexWriter iw = new SolrIndexWriter("SolrSnapshotCleaner", dir, conf)) {
    // Do nothing. The only purpose of opening index writer is to invoke the Lucene IndexDeletionPolicy#onInit
    // method so that we can cleanup the files associated with specified index commit.
    // Note the index writer creates a new commit during the close() operation (which is harmless).
  }
}
 
源代码18 项目: lucene-solr   文件: TestInPlaceUpdatesDistrib.java
@BeforeClass
public static void beforeSuperClass() throws Exception {
  schemaString = "schema-inplace-updates.xml";
  configString = "solrconfig-tlog.xml";

  // we need consistent segments that aren't re-ordered on merge because we're
  // asserting inplace updates happen by checking the internal [docid]
  systemSetPropertySolrTestsMergePolicyFactory(NoMergePolicyFactory.class.getName());

  randomizeUpdateLogImpl();

  initCore(configString, schemaString);
  
  // sanity check that autocommits are disabled
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxTime);
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxTime);
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxDocs);
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxDocs);
  
  // assert that NoMergePolicy was chosen
  RefCounted<IndexWriter> iw = h.getCore().getSolrCoreState().getIndexWriter(h.getCore());
  try {
    IndexWriter writer = iw.get();
    assertTrue("Actual merge policy is: " + writer.getConfig().getMergePolicy(),
        writer.getConfig().getMergePolicy() instanceof NoMergePolicy); 
  } finally {
    iw.decref();
  }
}
 
源代码19 项目: lucene-solr   文件: TestMergePolicyConfig.java
public void testNoMergePolicyFactoryConfig() throws Exception {
  initCore("solrconfig-nomergepolicyfactory.xml","schema-minimal.xml");
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
  NoMergePolicy mergePolicy = assertAndCast(NoMergePolicy.class,
      iwc.getMergePolicy());

  assertCommitSomeNewDocs();

  assertCommitSomeNewDocs();
  assertNumSegments(h.getCore(), 2);

  assertU(optimize());
  assertNumSegments(h.getCore(), 2);
  deleteCore();
  initCore("solrconfig-nomergepolicyfactory.xml","schema-minimal.xml");
  iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
  assertEquals(mergePolicy, iwc.getMergePolicy());

  UpdateHandler updater = h.getCore().getUpdateHandler();
  SolrQueryRequest req = req();
  CommitUpdateCommand cmtCmd = new CommitUpdateCommand(req, true);
  cmtCmd.maxOptimizeSegments = -1;
  expectThrows(IllegalArgumentException.class, () -> {
    updater.commit(cmtCmd);
  });

}
 
源代码20 项目: Elasticsearch   文件: MergePolicyConfig.java
public MergePolicy getMergePolicy() {
    return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
}
 
源代码21 项目: lucene-solr   文件: CreateIndexTaskTest.java
public void testNoMergePolicy() throws Exception {
  PerfRunData runData = createPerfRunData(null);
  runData.getConfig().set("merge.policy", NoMergePolicy.class.getName());
  new CreateIndexTask(runData).doLogic();
  new CloseIndexTask(runData).doLogic();
}
 
源代码22 项目: lucene-solr   文件: TestBlockJoin.java
public void testEmptyChildFilter() throws Exception {
  final Directory dir = newDirectory();
  final IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
  config.setMergePolicy(NoMergePolicy.INSTANCE);
  // we don't want to merge - since we rely on certain segment setup
  final IndexWriter w = new IndexWriter(dir, config);

  final List<Document> docs = new ArrayList<>();

  docs.add(makeJob("java", 2007));
  docs.add(makeJob("python", 2010));
  docs.add(makeResume("Lisa", "United Kingdom"));
  w.addDocuments(docs);

  docs.clear();
  docs.add(makeJob("ruby", 2005));
  docs.add(makeJob("java", 2006));
  docs.add(makeResume("Frank", "United States"));
  w.addDocuments(docs);
  w.commit();

  IndexReader r = DirectoryReader.open(w);
  w.close();
  IndexSearcher s = newSearcher(r);
  BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("docType", "resume")));
  CheckJoinIndex.check(r, parentsFilter);

  BooleanQuery.Builder childQuery = new BooleanQuery.Builder();
  childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
  childQuery.add(new BooleanClause(IntPoint.newRangeQuery("year", 2006, 2011), Occur.MUST));

  ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery.build(), parentsFilter, ScoreMode.Avg);

  BooleanQuery.Builder fullQuery = new BooleanQuery.Builder();
  fullQuery.add(new BooleanClause(childJoinQuery, Occur.MUST));
  fullQuery.add(new BooleanClause(new MatchAllDocsQuery(), Occur.MUST));
  TopDocs topDocs = s.search(fullQuery.build(), 2);
  assertEquals(2, topDocs.totalHits.value);
  assertEquals(asSet("Lisa", "Frank"),
      asSet(s.doc(topDocs.scoreDocs[0].doc).get("name"), s.doc(topDocs.scoreDocs[1].doc).get("name")));

  ParentChildrenBlockJoinQuery childrenQuery =
      new ParentChildrenBlockJoinQuery(parentsFilter, childQuery.build(), topDocs.scoreDocs[0].doc);
  TopDocs matchingChildren = s.search(childrenQuery, 1);
  assertEquals(1, matchingChildren.totalHits.value);
  assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill"));

  childrenQuery = new ParentChildrenBlockJoinQuery(parentsFilter, childQuery.build(), topDocs.scoreDocs[1].doc);
  matchingChildren = s.search(childrenQuery, 1);
  assertEquals(1, matchingChildren.totalHits.value);
  assertEquals("java", s.doc(matchingChildren.scoreDocs[0].doc).get("skill"));

  r.close();
  dir.close();
}
 
源代码23 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testDocValuesUpdatesDontBreakCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  IndexWriter w = new IndexWriter(dir, iwc);
  w.addDocument(new Document());
  w.commit();
  DirectoryReader reader = DirectoryReader.open(w);

  // IMPORTANT:
  // Don't use newSearcher(), because that will sometimes use an ExecutorService, and
  // we need to be single threaded to ensure that LRUQueryCache doesn't skip the cache
  // due to thread contention
  IndexSearcher searcher = new AssertingIndexSearcher(random(), reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  LRUQueryCache cache = new LRUQueryCache(1, 10000, context -> true, Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);

  DVCacheQuery query = new DVCacheQuery("field");
  assertEquals(1, searcher.count(query));
  assertEquals(1, query.scorerCreatedCount.get());
  assertEquals(1, searcher.count(query));
  assertEquals(1, query.scorerCreatedCount.get());  // should be cached

  Document doc = new Document();
  doc.add(new NumericDocValuesField("field", 1));
  doc.add(newTextField("text", "text", Store.NO));
  w.addDocument(doc);
  reader.close();
  reader = DirectoryReader.open(w);
  searcher = new AssertingIndexSearcher(random(), reader); // no newSearcher(reader) - see comment above
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  searcher.setQueryCache(cache);

  assertEquals(2, searcher.count(query));
  assertEquals(2, query.scorerCreatedCount.get());  // first segment cached

  reader.close();
  reader = DirectoryReader.open(w);
  searcher = new AssertingIndexSearcher(random(), reader); // no newSearcher(reader) - see comment above
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  searcher.setQueryCache(cache);

  assertEquals(2, searcher.count(query));
  assertEquals(2, query.scorerCreatedCount.get());  // both segments cached


  w.updateNumericDocValue(new Term("text", "text"), "field", 2l);
  reader.close();
  reader = DirectoryReader.open(w);
  searcher = new AssertingIndexSearcher(random(), reader); // no newSearcher(reader) - see comment above
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  searcher.setQueryCache(cache);

  assertEquals(2, searcher.count(query));
  assertEquals(3, query.scorerCreatedCount.get());   // second segment no longer cached due to DV update

  assertEquals(2, searcher.count(query));
  assertEquals(4, query.scorerCreatedCount.get());    // still no caching

  reader.close();
  w.close();
  dir.close();
}
 
源代码24 项目: lucene-solr   文件: TestLRUQueryCache.java
public void testBulkScorerLocking() throws Exception {

    Directory dir = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig()
        .setMergePolicy(NoMergePolicy.INSTANCE)
        // the test framework sometimes sets crazy low values, prevent this since we are indexing many docs
        .setMaxBufferedDocs(-1);
    IndexWriter w = new IndexWriter(dir, iwc);

    final int numDocs = atLeast(10);
    Document emptyDoc = new Document();
    for (int d = 0; d < numDocs; ++d) {
      for (int i = random().nextInt(5000); i >= 0; --i) {
        w.addDocument(emptyDoc);
      }
      Document doc = new Document();
      for (String value : Arrays.asList("foo", "bar", "baz")) {
        if (random().nextBoolean()) {
          doc.add(new StringField("field", value, Store.NO));
        }
      }
    }
    for (int i = TestUtil.nextInt(random(), 3000, 5000); i >= 0; --i) {
      w.addDocument(emptyDoc);
    }
    if (random().nextBoolean()) {
      w.forceMerge(1);
    }

    DirectoryReader reader = DirectoryReader.open(w);
    DirectoryReader noCacheReader = new DummyDirectoryReader(reader);

    LRUQueryCache cache = new LRUQueryCache(1, 100000, context -> true, Float.POSITIVE_INFINITY);
    IndexSearcher searcher = new AssertingIndexSearcher(random(), reader);
    searcher.setQueryCache(cache);
    searcher.setQueryCachingPolicy(ALWAYS_CACHE);

    Query query = new ConstantScoreQuery(new BooleanQuery.Builder()
        .add(new BoostQuery(new TermQuery(new Term("field", "foo")), 3), Occur.SHOULD)
        .add(new BoostQuery(new TermQuery(new Term("field", "bar")), 3), Occur.SHOULD)
        .add(new BoostQuery(new TermQuery(new Term("field", "baz")), 3), Occur.SHOULD)
        .build());

    searcher.search(query, 1);

    IndexSearcher noCacheHelperSearcher = new AssertingIndexSearcher(random(), noCacheReader);
    noCacheHelperSearcher.setQueryCache(cache);
    noCacheHelperSearcher.setQueryCachingPolicy(ALWAYS_CACHE);
    noCacheHelperSearcher.search(query, 1);

    Thread t = new Thread(() -> {
      try {
        noCacheReader.close();
        w.close();
        dir.close();
      }
      catch (Exception e) {
        throw new RuntimeException(e);
      }
    });
    t.start();
    t.join();
  }
 
源代码25 项目: lucene-solr   文件: TestTopDocsCollector.java
public void testSetMinCompetitiveScore() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 2);
  ScoreAndDoc scorer = new ScoreAndDoc();

  LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
  leafCollector.setScorer(scorer);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 1;
  scorer.score = 2;
  leafCollector.collect(1);
  assertNull(scorer.minCompetitiveScore);
  
  scorer.doc = 2;
  scorer.score = 3;
  leafCollector.collect(2);
  assertEquals(Math.nextUp(2f), scorer.minCompetitiveScore, 0f);

  scorer.doc = 3;
  scorer.score = 0.5f;
  // Make sure we do not call setMinCompetitiveScore for non-competitive hits
  scorer.minCompetitiveScore = Float.NaN;
  leafCollector.collect(3);
  assertTrue(Float.isNaN(scorer.minCompetitiveScore));

  scorer.doc = 4;
  scorer.score = 4;
  leafCollector.collect(4);
  assertEquals(Math.nextUp(3f), scorer.minCompetitiveScore, 0f);

  // Make sure the min score is set on scorers on new segments
  scorer = new ScoreAndDoc();
  leafCollector = collector.getLeafCollector(reader.leaves().get(1));
  leafCollector.setScorer(scorer);
  assertEquals(Math.nextUp(3f), scorer.minCompetitiveScore, 0f);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertEquals(Math.nextUp(3f), scorer.minCompetitiveScore, 0f);

  scorer.doc = 1;
  scorer.score = 4;
  leafCollector.collect(1);
  assertEquals(Math.nextUp(4f), scorer.minCompetitiveScore, 0f);

  reader.close();
  dir.close();
}
 
源代码26 项目: lucene-solr   文件: TestTopDocsCollector.java
public void testTotalHits() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  for (int totalHitsThreshold = 0; totalHitsThreshold < 20; ++ totalHitsThreshold) {
    TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, totalHitsThreshold);
    ScoreAndDoc scorer = new ScoreAndDoc();

    LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
    leafCollector.setScorer(scorer);

    scorer.doc = 0;
    scorer.score = 3;
    leafCollector.collect(0);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    leafCollector = collector.getLeafCollector(reader.leaves().get(1));
    leafCollector.setScorer(scorer);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    scorer.doc = 5;
    scorer.score = 4;
    leafCollector.collect(1);

    TopDocs topDocs = collector.topDocs();
    assertEquals(4, topDocs.totalHits.value);
    assertEquals(totalHitsThreshold < 4, scorer.minCompetitiveScore != null);
    assertEquals(new TotalHits(4, totalHitsThreshold < 4 ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO : TotalHits.Relation.EQUAL_TO), topDocs.totalHits);
  }

  reader.close();
  dir.close();
}
 
源代码27 项目: lucene-solr   文件: TestTopFieldCollector.java
public void testSetMinCompetitiveScore() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  Sort sort = new Sort(FIELD_SCORE, new SortField("foo", SortField.Type.LONG));
  TopFieldCollector collector = TopFieldCollector.create(sort, 2, null, 2);
  ScoreAndDoc scorer = new ScoreAndDoc();

  LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
  leafCollector.setScorer(scorer);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 1;
  scorer.score = 2;
  leafCollector.collect(1);
  assertNull(scorer.minCompetitiveScore);
  
  scorer.doc = 2;
  scorer.score = 3;
  leafCollector.collect(2);
  assertEquals(2f, scorer.minCompetitiveScore, 0f);

  scorer.doc = 3;
  scorer.score = 0.5f;
  // Make sure we do not call setMinCompetitiveScore for non-competitive hits
  scorer.minCompetitiveScore = Float.NaN;
  leafCollector.collect(3);
  assertTrue(Float.isNaN(scorer.minCompetitiveScore));

  scorer.doc = 4;
  scorer.score = 4;
  leafCollector.collect(4);
  assertEquals(3f, scorer.minCompetitiveScore, 0f);

  // Make sure the min score is set on scorers on new segments
  scorer = new ScoreAndDoc();
  leafCollector = collector.getLeafCollector(reader.leaves().get(1));
  leafCollector.setScorer(scorer);
  assertEquals(3f, scorer.minCompetitiveScore, 0f);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertEquals(3f, scorer.minCompetitiveScore, 0f);

  scorer.doc = 1;
  scorer.score = 4;
  leafCollector.collect(1);
  assertEquals(4f, scorer.minCompetitiveScore, 0f);

  reader.close();
  dir.close();
}
 
源代码28 项目: lucene-solr   文件: TestTopFieldCollector.java
public void testTotalHitsWithScore() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  for (int totalHitsThreshold = 0; totalHitsThreshold < 20; ++ totalHitsThreshold) {
    Sort sort = new Sort(FIELD_SCORE, new SortField("foo", SortField.Type.LONG));
    TopFieldCollector collector = TopFieldCollector.create(sort, 2, null, totalHitsThreshold);
    ScoreAndDoc scorer = new ScoreAndDoc();

    LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
    leafCollector.setScorer(scorer);

    scorer.doc = 0;
    scorer.score = 3;
    leafCollector.collect(0);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    leafCollector = collector.getLeafCollector(reader.leaves().get(1));
    leafCollector.setScorer(scorer);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    scorer.doc = 5;
    scorer.score = 4;
    leafCollector.collect(1);

    TopDocs topDocs = collector.topDocs();
    assertEquals(totalHitsThreshold < 4, scorer.minCompetitiveScore != null);
    assertEquals(new TotalHits(4, totalHitsThreshold < 4 ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO : TotalHits.Relation.EQUAL_TO), topDocs.totalHits);
  }

  reader.close();
  dir.close();
}
 
public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException {
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  Directory d = newDirectory();
  final CountDownLatch latch = new CountDownLatch(1);
  final CountDownLatch signal = new CountDownLatch(1);

  LatchedIndexWriter writer = new LatchedIndexWriter(d, conf, latch, signal);
  final SearcherManager manager = new SearcherManager(writer, false, false, null);
  Document doc = new Document();
  doc.add(newTextField("test", "test", Field.Store.YES));
  writer.addDocument(doc);
  manager.maybeRefresh();
  Thread t = new Thread() {
    @Override
    public void run() {
      try {
        signal.await();
        manager.maybeRefresh();
        writer.deleteDocuments(new TermQuery(new Term("foo", "barista")));
        manager.maybeRefresh(); // kick off another reopen so we inc. the internal gen
      } catch (Exception e) {
        e.printStackTrace();
      } finally {
        latch.countDown(); // let the add below finish
      }
    }
  };
  t.start();
  writer.waitAfterUpdate = true; // wait in addDocument to let some reopens go through

  final long lastGen = writer.updateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen

  // We now eagerly resolve deletes so the manager should see it after update:
  assertTrue(manager.isSearcherCurrent());
  
  IndexSearcher searcher = manager.acquire();
  try {
    assertEquals(2, searcher.getIndexReader().numDocs());
  } finally {
    manager.release(searcher);
  }
  final ControlledRealTimeReopenThread<IndexSearcher> thread = new ControlledRealTimeReopenThread<>(writer, manager, 0.01, 0.01);
  thread.start(); // start reopening
  if (VERBOSE) {
    System.out.println("waiting now for generation " + lastGen);
  }
  
  final AtomicBoolean finished = new AtomicBoolean(false);
  Thread waiter = new Thread() {
    @Override
    public void run() {
      try {
        thread.waitForGeneration(lastGen);
      } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(ie);
      }
      finished.set(true);
    }
  };
  waiter.start();
  manager.maybeRefresh();
  waiter.join(1000);
  if (!finished.get()) {
    waiter.interrupt();
    fail("thread deadlocked on waitForGeneration");
  }
  thread.close();
  thread.join();
  writer.close();
  IOUtils.close(manager, d);
}
 
源代码30 项目: lucene-solr   文件: NoMergePolicyFactory.java
@Override
protected MergePolicy getMergePolicyInstance() {
  return NoMergePolicy.INSTANCE;
}