类org.apache.lucene.search.SearcherManager源码实例Demo

下面列出了怎么用org.apache.lucene.search.SearcherManager的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: onedev   文件: DefaultSearchManager.java
@Transactional
@Listen
public void on(EntityRemoved event) {
	if (event.getEntity() instanceof Project) {
		synchronized (searcherManagers) {
			Long projectId = event.getEntity().getId();						
			SearcherManager searcherManager = searcherManagers.remove(projectId);
			if (searcherManager != null) {
				try {
					searcherManager.close();
				} catch (IOException e) {
					throw ExceptionUtils.unchecked(e);
				}
			}
		}
	}
}
 
源代码2 项目: geode-examples   文件: SpatialHelperTest.java
@Test
public void queryFindsADocumentThatWasAdded() throws IOException {

  // Create an in memory lucene index to add a document to
  RAMDirectory directory = new RAMDirectory();
  IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig());

  // Add a document to the lucene index
  Document document = new Document();
  document.add(new TextField("name", "name", Field.Store.YES));
  Field[] fields = SpatialHelper.getIndexableFields(-122.8515139, 45.5099231);
  for (Field field : fields) {
    document.add(field);
  }
  writer.addDocument(document);
  writer.commit();


  // Make sure a findWithin query locates the document
  Query query = SpatialHelper.findWithin(-122.8515239, 45.5099331, 1);
  SearcherManager searcherManager = new SearcherManager(writer, null);
  IndexSearcher searcher = searcherManager.acquire();
  TopDocs results = searcher.search(query, 100);
  assertEquals(1, results.totalHits);
}
 
源代码3 项目: Elasticsearch   文件: Engine.java
/**
 * Read the last segments info from the commit pointed to by the searcher manager
 */
protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException {
    IndexSearcher searcher = sm.acquire();
    try {
        IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit();
        return Lucene.readSegmentInfos(latestCommit);
    } catch (IOException e) {
        // Fall back to reading from the store if reading from the commit fails
        try {
            return store. readLastCommittedSegmentsInfo();
        } catch (IOException e2) {
            e2.addSuppressed(e);
            throw e2;
        }
    } finally {
        sm.release(searcher);
    }
}
 
源代码4 项目: lucene-solr   文件: SegmentInfosSearcherManager.java
@Override
protected IndexSearcher refreshIfNeeded(IndexSearcher old) throws IOException {
  List<LeafReader> subs;
  if (old == null) {
    subs = null;
  } else {
    subs = new ArrayList<>();
    for(LeafReaderContext ctx : old.getIndexReader().leaves()) {
      subs.add(ctx.reader());
    }
  }

  // Open a new reader, sharing any common segment readers with the old one:
  DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs);
  addReaderClosedListener(r);
  node.message("refreshed to version=" + currentInfos.getVersion() + " r=" + r);
  return SearcherManager.getSearcher(searcherFactory, r, old.getIndexReader());
}
 
源代码5 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
/** Create a new instance, loading from a previously built
   *  AnalyzingInfixSuggester directory, if it exists.  This directory must be
   *  private to the infix suggester (i.e., not an external
   *  Lucene index).  Note that {@link #close}
   *  will also close the provided directory.
   *
   *  @param minPrefixChars Minimum number of leading characters
   *     before PrefixQuery is used (default 4).
   *     Prefixes shorter than this are indexed as character
   *     ngrams (increasing index size but making lookups
   *     faster).
   *
   *  @param commitOnBuild Call commit after the index has finished building. This would persist the
   *                       suggester index to disk and future instances of this suggester can use this pre-built dictionary.
   *
   *  @param allTermsRequired All terms in the suggest query must be matched.
   *  @param highlight Highlight suggest query in suggestions.
   *  @param closeIndexWriterOnBuild If true, the IndexWriter will be closed after the index has finished building.
   */
public AnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars,
                               boolean commitOnBuild, boolean allTermsRequired, 
                               boolean highlight, boolean closeIndexWriterOnBuild) throws IOException {
                                  
  if (minPrefixChars < 0) {
    throw new IllegalArgumentException("minPrefixChars must be >= 0; got: " + minPrefixChars);
  }

  this.queryAnalyzer = queryAnalyzer;
  this.indexAnalyzer = indexAnalyzer;
  this.dir = dir;
  this.minPrefixChars = minPrefixChars;
  this.commitOnBuild = commitOnBuild;
  this.allTermsRequired = allTermsRequired;
  this.highlight = highlight;
  this.closeIndexWriterOnBuild = closeIndexWriterOnBuild;

  if (DirectoryReader.indexExists(dir)) {
    // Already built; open it:
    searcherMgr = new SearcherManager(dir, null);
  }
}
 
源代码6 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
@Override
public long getCount() throws IOException {
  if (searcherMgr == null) {
    return 0;
  }
  SearcherManager mgr;
  IndexSearcher searcher;
  synchronized (searcherMgrLock) {
    mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
    searcher = mgr.acquire();
  }
  try {
    return searcher.getIndexReader().numDocs();
  } finally {
    mgr.release(searcher);
  }
}
 
public void testSoftDeleteWithTryUpdateDocValue() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField("soft_delete")
      .setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete", MatchAllDocsQuery::new, newLogMergePolicy()));
  IndexWriter writer = new IndexWriter(dir, config);
  SearcherManager sm = new SearcherManager(writer, new SearcherFactory());
  Document d = new Document();
  d.add(new StringField("id", "0", Field.Store.YES));
  writer.addDocument(d);
  sm.maybeRefreshBlocking();
  doUpdate(new Term("id", "0"), writer,
      new NumericDocValuesField("soft_delete", 1), new NumericDocValuesField("other-field", 1));
  sm.maybeRefreshBlocking();
  assertEquals(1, writer.cloneSegmentInfos().size());
  SegmentCommitInfo si = writer.cloneSegmentInfos().info(0);
  assertEquals(1, si.getSoftDelCount());
  assertEquals(1, si.info.maxDoc());
  IOUtils.close(sm, writer, dir);
}
 
源代码8 项目: geode-examples   文件: SpatialHelperTest.java
@Test
public void queryFindsADocumentThatWasAdded() throws IOException {

  // Create an in memory lucene index to add a document to
  RAMDirectory directory = new RAMDirectory();
  IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig());

  // Add a document to the lucene index
  Document document = new Document();
  document.add(new TextField("name", "name", Field.Store.YES));
  Field[] fields = SpatialHelper.getIndexableFields(-122.8515139, 45.5099231);
  for (Field field : fields) {
    document.add(field);
  }
  writer.addDocument(document);
  writer.commit();


  // Make sure a findWithin query locates the document
  Query query = SpatialHelper.findWithin(-122.8515239, 45.5099331, 1);
  SearcherManager searcherManager = new SearcherManager(writer, null);
  IndexSearcher searcher = searcherManager.acquire();
  TopDocs results = searcher.search(query, 100);
  assertEquals(1, results.totalHits);
}
 
/**
 * Created by {@link org.apache.pinot.core.indexsegment.mutable.MutableSegmentImpl}
 * for each column on which text index has been enabled
 * @param column column name
 * @param segmentIndexDir realtime segment consumer dir
 * @param segmentName realtime segment name
 */
public RealtimeLuceneTextIndexReader(String column, File segmentIndexDir, String segmentName) {
  _column = column;
  _segmentName = segmentName;
  try {
    // indexCreator.close() is necessary for cleaning up the resources associated with lucene
    // index writer that was indexing data realtime. We close the indexCreator
    // when the realtime segment is destroyed (we would have already committed the
    // segment and converted it into offline before destroy is invoked)
    // So committing the lucene index for the realtime in-memory segment is not necessary
    // as it is already part of the offline segment after the conversion.
    // This is why "commitOnClose" is set to false when creating the lucene index writer
    // for realtime
    _indexCreator =
        new LuceneTextIndexCreator(column, new File(segmentIndexDir.getAbsolutePath() + "/" + segmentName),
            false /* commitOnClose */);
    IndexWriter indexWriter = _indexCreator.getIndexWriter();
    _searcherManager = new SearcherManager(indexWriter, false, false, null);
  } catch (Exception e) {
    LOGGER.error("Failed to instantiate realtime Lucene index reader for column {}, exception {}", column,
        e.getMessage());
    throw new RuntimeException(e);
  }
  StandardAnalyzer analyzer = new StandardAnalyzer();
  _queryParser = new QueryParser(column, analyzer);
}
 
源代码10 项目: vscode-extension   文件: test.java
ExternalSearcherManager(SearcherManager internalSearcherManager, SearcherFactory searcherFactory) throws IOException {
    IndexSearcher acquire = internalSearcherManager.acquire();
    try {
        IndexReader indexReader = acquire.getIndexReader();
        assert indexReader instanceof ElasticsearchDirectoryReader:
            "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + indexReader;
        indexReader.incRef(); // steal the reader - getSearcher will decrement if it fails
        current = SearcherManager.getSearcher(searcherFactory, indexReader, null);
    } finally {
        internalSearcherManager.release(acquire);
    }
    this.searcherFactory = searcherFactory;
    this.internalSearcherManager = internalSearcherManager;
}
 
源代码11 项目: onedev   文件: DefaultSearchManager.java
@Listen
public void on(CommitIndexed event) {
	try {
		SearcherManager searcherManager = getSearcherManager(event.getProject().getForkRoot()); 
		if (searcherManager != null)
			searcherManager.maybeRefresh();
	} catch (InterruptedException | IOException e) {
		throw ExceptionUtils.unchecked(e);
	}
}
 
源代码12 项目: onedev   文件: DefaultSearchManager.java
@Listen
public void on(SystemStopping event) {
	synchronized (searcherManagers) {
		for (SearcherManager searcherManager: searcherManagers.values()) {
			try {
				searcherManager.close();
			} catch (IOException e) {
				throw ExceptionUtils.unchecked(e);
			}
		}
		searcherManagers.clear();
	}
}
 
源代码13 项目: Elasticsearch   文件: ShadowEngine.java
public ShadowEngine(EngineConfig engineConfig)  {
    super(engineConfig);
    SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig);
    final long nonexistentRetryTime = engineConfig.getIndexSettings()
            .getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT)
            .getMillis();
    try {
        DirectoryReader reader = null;
        store.incRef();
        boolean success = false;
        try {
            if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) {
                reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId);
                this.searcherManager = new SearcherManager(reader, searcherFactory);
                this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
                success = true;
            } else {
                throw new IllegalStateException("failed to open a shadow engine after" +
                        nonexistentRetryTime + "ms, " +
                        "directory is not an index");
            }
        } catch (Throwable e) {
            logger.warn("failed to create new reader", e);
            throw e;
        } finally {
            if (success == false) {
                IOUtils.closeWhileHandlingException(reader);
                store.decRef();
            }
        }
    } catch (IOException ex) {
        throw new EngineCreationFailureException(shardId, "failed to open index reader", ex);
    }
    logger.trace("created new ShadowEngine");
}
 
源代码14 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
@Override
public long ramBytesUsed() {
  long mem = RamUsageEstimator.shallowSizeOf(this);
  try {
    if (searcherMgr != null) {
      SearcherManager mgr;
      IndexSearcher searcher;
      synchronized (searcherMgrLock) {
        mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
        searcher = mgr.acquire();
      }
      try {
        for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
          LeafReader reader = FilterLeafReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            mem += ((SegmentReader) context.reader()).ramBytesUsed();
          }
        }
      } finally {
        mgr.release(searcher);
      }
    }
    return mem;
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
源代码15 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
@Override
public Collection<Accountable> getChildResources() {
  List<Accountable> resources = new ArrayList<>();
  try {
    if (searcherMgr != null) {
      SearcherManager mgr;
      IndexSearcher searcher;
      synchronized (searcherMgrLock) {
        mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
        searcher = mgr.acquire();
      }
      try {
        for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
          LeafReader reader = FilterLeafReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            resources.add(Accountables.namedAccountable("segment", (SegmentReader)reader));
          }
        }
      } finally {
        mgr.release(searcher);
      }
    }
    return Collections.unmodifiableList(resources);
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
源代码16 项目: lucene-solr   文件: SearcherTaxonomyManager.java
@Override
protected SearcherAndTaxonomy refreshIfNeeded(SearcherAndTaxonomy ref) throws IOException {
  // Must re-open searcher first, otherwise we may get a
  // new reader that references ords not yet known to the
  // taxonomy reader:
  final IndexReader r = ref.searcher.getIndexReader();
  final IndexReader newReader = DirectoryReader.openIfChanged((DirectoryReader) r);
  if (newReader == null) {
    return null;
  } else {
    DirectoryTaxonomyReader tr;
    try {
      tr = TaxonomyReader.openIfChanged(ref.taxonomyReader);
    } catch (Throwable t1) {
      try {
        IOUtils.close(newReader);
      } catch (Throwable t2) {
        t2.addSuppressed(t2);
      }
      throw t1;
    }
    if (tr == null) {
      ref.taxonomyReader.incRef();
      tr = ref.taxonomyReader;
    } else if (taxoWriter != null && taxoWriter.getTaxonomyEpoch() != taxoEpoch) {
      IOUtils.close(newReader, tr);
      throw new IllegalStateException("DirectoryTaxonomyWriter.replaceTaxonomy was called, which is not allowed when using SearcherTaxonomyManager");
    }

    return new SearcherAndTaxonomy(SearcherManager.getSearcher(searcherFactory, newReader, r), tr);
  }
}
 
源代码17 项目: lucene-solr   文件: QueryIndex.java
QueryIndex(MonitorConfiguration config, Presearcher presearcher) throws IOException {
  this.writer = config.buildIndexWriter();
  this.manager = new SearcherManager(writer, true, true, new TermsHashBuilder());
  this.decomposer = config.getQueryDecomposer();
  this.serializer = config.getQuerySerializer();
  this.presearcher = presearcher;
  populateQueryCache(serializer, decomposer);
}
 
源代码18 项目: yes-cart   文件: LuceneIndexProviderImpl.java
/**
 * {@inheritDoc}
 */
@Override
public void afterPropertiesSet() throws Exception {

    index = getInstance(uri + File.separatorChar + name + File.separatorChar + "index");
    provideIndexWriter();
    indexReaderManager = new SearcherManager(indexWriter, null);

    facets = getInstance(uri + File.separatorChar + name + File.separatorChar + "taxonomy");
    provideFacetsWriter();
    facetsReaderManager = new SearcherTaxonomyManager(indexWriter, null, facetsWriter);

}
 
源代码19 项目: sql-layer   文件: FullTextCursor.java
public FullTextCursor(QueryContext context, HKeyRowType rowType, 
                      SearcherManager searcherManager, Query query, int limit) {
    this.context = context;
    this.rowType = rowType;
    this.searcherManager = searcherManager;
    this.query = query;
    this.limit = limit;
    //adapter = context.getStore();
    searcher = searcherManager.acquire();
}
 
源代码20 项目: incubator-pinot   文件: TextSearchQueriesTest.java
@Test
public void testMultiThreadedLuceneRealtime() throws Exception {
  File indexFile = new File(INDEX_DIR.getPath() + "/realtime-test3.index");
  Directory indexDirectory = FSDirectory.open(indexFile.toPath());
  StandardAnalyzer standardAnalyzer = new StandardAnalyzer();
  // create and open a writer
  IndexWriterConfig indexWriterConfig = new IndexWriterConfig(standardAnalyzer);
  indexWriterConfig.setRAMBufferSizeMB(500);
  IndexWriter indexWriter = new IndexWriter(indexDirectory, indexWriterConfig);

  // create an NRT index reader
  SearcherManager searcherManager = new SearcherManager(indexWriter, false, false, null);

  // background thread to refresh NRT reader
  ControlledRealTimeReopenThread controlledRealTimeReopenThread =
      new ControlledRealTimeReopenThread(indexWriter, searcherManager, 0.01, 0.01);
  controlledRealTimeReopenThread.start();

  // start writer and reader
  Thread writer = new Thread(new RealtimeWriter(indexWriter));
  Thread realtimeReader = new Thread(new RealtimeReader(searcherManager, standardAnalyzer));

  writer.start();
  realtimeReader.start();

  writer.join();
  realtimeReader.join();
  controlledRealTimeReopenThread.join();
}
 
源代码21 项目: crate   文件: InternalEngine.java
ExternalSearcherManager(SearcherManager internalSearcherManager, SearcherFactory searcherFactory) throws IOException {
    IndexSearcher acquire = internalSearcherManager.acquire();
    try {
        IndexReader indexReader = acquire.getIndexReader();
        assert indexReader instanceof ElasticsearchDirectoryReader :
            "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + indexReader;
        indexReader.incRef(); // steal the reader - getSearcher will decrement if it fails
        current = SearcherManager.getSearcher(searcherFactory, indexReader, null);
    } finally {
        internalSearcherManager.release(acquire);
    }
    this.searcherFactory = searcherFactory;
    this.internalSearcherManager = internalSearcherManager;
}
 
源代码22 项目: onedev   文件: DefaultSearchManager.java
@Override
public List<QueryHit> search(Project project, ObjectId commit, final BlobQuery query) 
		throws InterruptedException {
	List<QueryHit> hits = new ArrayList<>();

	SearcherManager searcherManager = getSearcherManager(project.getForkRoot());
	if (searcherManager != null) {
		try {
			final IndexSearcher searcher = searcherManager.acquire();
			try {
				try (RevWalk revWalk = new RevWalk(project.getRepository())){
					final RevTree revTree = revWalk.parseCommit(commit).getTree();
					final Set<String> checkedBlobPaths = new HashSet<>();
					
					searcher.search(query.asLuceneQuery(), new SimpleCollector() {

						private BinaryDocValues blobPathValues;
						
						@Override
						public void collect(int doc) throws IOException {
							if (hits.size() < query.getCount() && !Thread.currentThread().isInterrupted()) {
								Preconditions.checkState(blobPathValues.advanceExact(doc));
								String blobPath = blobPathValues.binaryValue().utf8ToString();
								
								if (!checkedBlobPaths.contains(blobPath)) {
									TreeWalk treeWalk = TreeWalk.forPath(project.getRepository(), blobPath, revTree);									
									if (treeWalk != null)
										query.collect(searcher, treeWalk, hits);
									checkedBlobPaths.add(blobPath);
								}
							}
						}

						@Override
						protected void doSetNextReader(LeafReaderContext context) throws IOException {
							blobPathValues  = context.reader().getBinaryDocValues(FieldConstants.BLOB_PATH.name());
						}

						@Override
						public boolean needsScores() {
							return false;
						}

					});
				}
			} finally {
				searcherManager.release(searcher);
			}
		} catch (IOException e) {
			throw ExceptionUtils.unchecked(e);
		}
	}
	if (Thread.interrupted())
		throw new InterruptedException();

	return hits;
}
 
源代码23 项目: Elasticsearch   文件: DLBasedEngine.java
@Override
protected SearcherManager getSearcherManager() {
    return searcherManager;
}
 
源代码24 项目: Elasticsearch   文件: Engine.java
protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) {
    return new EngineSearcher(source, searcher, manager, store, logger);
}
 
源代码25 项目: Elasticsearch   文件: EngineSearcher.java
public EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager, Store store, ESLogger logger) {
    super(source, searcher);
    this.manager = manager;
    this.store = store;
    this.logger = logger;
}
 
源代码26 项目: Elasticsearch   文件: InternalEngine.java
@Override
protected SearcherManager getSearcherManager() {
    return searcherManager;
}
 
源代码27 项目: Elasticsearch   文件: ShadowEngine.java
@Override
protected SearcherManager getSearcherManager() {
    return searcherManager;
}
 
源代码28 项目: lucene-solr   文件: PrimaryNode.java
public PrimaryNode(IndexWriter writer, int id, long primaryGen, long forcePrimaryVersion,
                   SearcherFactory searcherFactory, PrintStream printStream) throws IOException {
  super(id, writer.getDirectory(), searcherFactory, printStream);
  message("top: now init primary");
  this.writer = writer;
  this.primaryGen = primaryGen;

  try {
    // So that when primary node's IndexWriter finishes a merge, but before it cuts over to the merged segment,
    // it copies it out to the replicas.  This ensures the whole system's NRT latency remains low even when a
    // large merge completes:
    writer.getConfig().setMergedSegmentWarmer(new PreCopyMergedSegmentWarmer(this));

    message("IWC:\n" + writer.getConfig());
    message("dir:\n" + writer.getDirectory());
    message("commitData: " + writer.getLiveCommitData());

    // Record our primaryGen in the userData, and set initial version to 0:
    Map<String,String> commitData = new HashMap<>();
    Iterable<Map.Entry<String,String>> iter = writer.getLiveCommitData();
    if (iter != null) {
      for(Map.Entry<String,String> ent : iter) {
        commitData.put(ent.getKey(), ent.getValue());
      }
    }
    commitData.put(PRIMARY_GEN_KEY, Long.toString(primaryGen));
    if (commitData.get(VERSION_KEY) == null) {
      commitData.put(VERSION_KEY, "0");
      message("add initial commitData version=0");
    } else {
      message("keep current commitData version=" + commitData.get(VERSION_KEY));
    }
    writer.setLiveCommitData(commitData.entrySet(), false);

    // We forcefully advance the SIS version to an unused future version.  This is necessary if the previous primary crashed and we are
    // starting up on an "older" index, else versions can be illegally reused but show different results:
    if (forcePrimaryVersion != -1) {
      message("now forcePrimaryVersion to version=" + forcePrimaryVersion);
      writer.advanceSegmentInfosVersion(forcePrimaryVersion);
    }

    mgr = new SearcherManager(writer, true, true, searcherFactory);
    setCurrentInfos(Collections.<String>emptySet());
    message("init: infos version=" + curInfos.getVersion());

  } catch (Throwable t) {
    message("init: exception");
    t.printStackTrace(printStream);
    throw new RuntimeException(t);
  }
}
 
源代码29 项目: lucene-solr   文件: AnalyzingInfixSuggesterTest.java
public void testCloseIndexWriterOnBuild() throws Exception {
  class MyAnalyzingInfixSuggester extends AnalyzingInfixSuggester {
    public MyAnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, 
                                     int minPrefixChars, boolean commitOnBuild, boolean allTermsRequired,
                                     boolean highlight, boolean closeIndexWriterOnBuild) throws IOException {
      super(dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild, 
            allTermsRequired, highlight, closeIndexWriterOnBuild);
    }
    public IndexWriter getIndexWriter() {
      return writer;
    } 
    public SearcherManager getSearcherManager() {
      return searcherMgr;
    }
  }

  // After build(), when closeIndexWriterOnBuild = true: 
  // * The IndexWriter should be null 
  // * The SearcherManager should be non-null
  // * SearcherManager's IndexWriter reference should be closed 
  //   (as evidenced by maybeRefreshBlocking() throwing AlreadyClosedException)
  Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
  Path tempDir = createTempDir("analyzingInfixContext");
  final MyAnalyzingInfixSuggester suggester = new MyAnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false,
      AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, true);
  suggester.build(new InputArrayIterator(sharedInputs));
  assertNull(suggester.getIndexWriter());
  assertNotNull(suggester.getSearcherManager());
  expectThrows(AlreadyClosedException.class, () -> suggester.getSearcherManager().maybeRefreshBlocking());
  
  suggester.close();

  // After instantiating from an already-built suggester dir:
  // * The IndexWriter should be null
  // * The SearcherManager should be non-null
  final MyAnalyzingInfixSuggester suggester2 = new MyAnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false,
      AnalyzingInfixSuggester.DEFAULT_ALL_TERMS_REQUIRED, AnalyzingInfixSuggester.DEFAULT_HIGHLIGHT, true);
  assertNull(suggester2.getIndexWriter());
  assertNotNull(suggester2.getSearcherManager());

  suggester2.close();
  a.close();
}
 
源代码30 项目: lucene-solr   文件: TestIndexWriter.java
public void testRandomOperations() throws Exception {
  IndexWriterConfig iwc = newIndexWriterConfig();
  iwc.setMergePolicy(new FilterMergePolicy(newMergePolicy()) {
    boolean keepFullyDeletedSegment = random().nextBoolean();

    @Override
    public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) {
      return keepFullyDeletedSegment;
    }
  });
  try (Directory dir = newDirectory();
       IndexWriter writer = new IndexWriter(dir, iwc);
       SearcherManager sm = new SearcherManager(writer, new SearcherFactory())) {
    Semaphore numOperations = new Semaphore(10 + random().nextInt(1000));
    boolean singleDoc = random().nextBoolean();
    Thread[] threads = new Thread[1 + random().nextInt(4)];
    CountDownLatch latch = new CountDownLatch(threads.length);
    for (int i = 0; i < threads.length; i++) {
      threads[i] = new Thread(() -> {
        latch.countDown();
        try {
          latch.await();
          while (numOperations.tryAcquire()) {
            String id = singleDoc ? "1" : Integer.toString(random().nextInt(10));
            Document doc = new Document();
            doc.add(new StringField("id", id, Field.Store.YES));
            if (random().nextInt(10) <= 2) {
              writer.updateDocument(new Term("id", id), doc);
            } else if (random().nextInt(10) <= 2) {
              writer.deleteDocuments(new Term("id", id));
            } else {
              writer.addDocument(doc);
            }
            if (random().nextInt(100) < 10) {
              sm.maybeRefreshBlocking();
            }
            if (random().nextInt(100) < 5) {
              writer.commit();
            }
            if (random().nextInt(100) < 1) {
              writer.forceMerge(1 + random().nextInt(10), random().nextBoolean());
            }
          }
        } catch (Exception e) {
          throw new AssertionError(e);
        }
      });
      threads[i].start();
    }
    for (Thread thread : threads) {
      thread.join();
    }
  }
}
 
 类所在包
 类方法
 同包方法