下面列出了怎么用org.apache.lucene.search.SearcherFactory的API类实例代码及写法,或者点击链接到github查看源代码。
public SimpleReplicaNode(Random random, int id, int tcpPort, Path indexPath, long curPrimaryGen, int primaryTCPPort,
SearcherFactory searcherFactory, boolean doCheckIndexOnClose) throws IOException {
super(id, getDirectory(random, id, indexPath, doCheckIndexOnClose), searcherFactory, System.out);
this.tcpPort = tcpPort;
this.random = new Random(random.nextLong());
// Random IO throttling on file copies: 5 - 20 MB/sec:
double mbPerSec = 5 * (1.0 + 3*random.nextDouble());
message(String.format(Locale.ROOT, "top: will rate limit file fetch to %.2f MB/sec", mbPerSec));
fetchRateLimiter = new RateLimiter.SimpleRateLimiter(mbPerSec);
this.curPrimaryTCPPort = primaryTCPPort;
start(curPrimaryGen);
// Handles fetching files from primary:
jobs = new Jobs(this);
jobs.setName("R" + id + ".copyJobs");
jobs.setDaemon(true);
jobs.start();
}
public void testSoftDeleteWithTryUpdateDocValue() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField("soft_delete")
.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_delete", MatchAllDocsQuery::new, newLogMergePolicy()));
IndexWriter writer = new IndexWriter(dir, config);
SearcherManager sm = new SearcherManager(writer, new SearcherFactory());
Document d = new Document();
d.add(new StringField("id", "0", Field.Store.YES));
writer.addDocument(d);
sm.maybeRefreshBlocking();
doUpdate(new Term("id", "0"), writer,
new NumericDocValuesField("soft_delete", 1), new NumericDocValuesField("other-field", 1));
sm.maybeRefreshBlocking();
assertEquals(1, writer.cloneSegmentInfos().size());
SegmentCommitInfo si = writer.cloneSegmentInfos().info(0);
assertEquals(1, si.getSoftDelCount());
assertEquals(1, si.info.maxDoc());
IOUtils.close(sm, writer, dir);
}
ExternalSearcherManager(SearcherManager internalSearcherManager, SearcherFactory searcherFactory) throws IOException {
IndexSearcher acquire = internalSearcherManager.acquire();
try {
IndexReader indexReader = acquire.getIndexReader();
assert indexReader instanceof ElasticsearchDirectoryReader:
"searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + indexReader;
indexReader.incRef(); // steal the reader - getSearcher will decrement if it fails
current = SearcherManager.getSearcher(searcherFactory, indexReader, null);
} finally {
internalSearcherManager.release(acquire);
}
this.searcherFactory = searcherFactory;
this.internalSearcherManager = internalSearcherManager;
}
public ShadowEngine(EngineConfig engineConfig) {
super(engineConfig);
SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig);
final long nonexistentRetryTime = engineConfig.getIndexSettings()
.getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT)
.getMillis();
try {
DirectoryReader reader = null;
store.incRef();
boolean success = false;
try {
if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) {
reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId);
this.searcherManager = new SearcherManager(reader, searcherFactory);
this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
success = true;
} else {
throw new IllegalStateException("failed to open a shadow engine after" +
nonexistentRetryTime + "ms, " +
"directory is not an index");
}
} catch (Throwable e) {
logger.warn("failed to create new reader", e);
throw e;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(reader);
store.decRef();
}
}
} catch (IOException ex) {
throw new EngineCreationFailureException(shardId, "failed to open index reader", ex);
}
logger.trace("created new ShadowEngine");
}
public ReplicaNode(int id, Directory dir, SearcherFactory searcherFactory, PrintStream printStream) throws IOException {
super(id, dir, searcherFactory, printStream);
if (dir.getPendingDeletions().isEmpty() == false) {
throw new IllegalArgumentException("Directory " + dir + " still has pending deleted files; cannot initialize IndexWriter");
}
boolean success = false;
try {
message("top: init replica dir=" + dir);
// Obtain a write lock on this index since we "act like" an IndexWriter, to prevent any other IndexWriter or ReplicaNode from using it:
writeFileLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME);
state = "init";
deleter = new ReplicaFileDeleter(this, dir);
success = true;
} catch (Throwable t) {
message("exc on init:");
t.printStackTrace(printStream);
throw t;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(this);
}
}
}
public SimplePrimaryNode(Random random, Path indexPath, int id, int tcpPort, long primaryGen, long forcePrimaryVersion, SearcherFactory searcherFactory,
boolean doFlipBitsDuringCopy, boolean doCheckIndexOnClose) throws IOException {
super(initWriter(id, random, indexPath, doCheckIndexOnClose), id, primaryGen, forcePrimaryVersion, searcherFactory, System.out);
this.tcpPort = tcpPort;
this.random = new Random(random.nextLong());
this.doFlipBitsDuringCopy = doFlipBitsDuringCopy;
}
private SearcherTaxonomyManager getSearcherTaxonomyManager(Directory indexDir, Directory taxoDir, SearcherFactory searcherFactory) throws IOException {
if (random().nextBoolean()) {
return new SearcherTaxonomyManager(indexDir, taxoDir, searcherFactory);
} else {
IndexReader reader = DirectoryReader.open(indexDir);
DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
return new SearcherTaxonomyManager(reader, taxoReader, searcherFactory);
}
}
ExternalSearcherManager(SearcherManager internalSearcherManager, SearcherFactory searcherFactory) throws IOException {
IndexSearcher acquire = internalSearcherManager.acquire();
try {
IndexReader indexReader = acquire.getIndexReader();
assert indexReader instanceof ElasticsearchDirectoryReader :
"searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + indexReader;
indexReader.incRef(); // steal the reader - getSearcher will decrement if it fails
current = SearcherManager.getSearcher(searcherFactory, indexReader, null);
} finally {
internalSearcherManager.release(acquire);
}
this.searcherFactory = searcherFactory;
this.internalSearcherManager = internalSearcherManager;
}
public PrimaryNode(IndexWriter writer, int id, long primaryGen, long forcePrimaryVersion,
SearcherFactory searcherFactory, PrintStream printStream) throws IOException {
super(id, writer.getDirectory(), searcherFactory, printStream);
message("top: now init primary");
this.writer = writer;
this.primaryGen = primaryGen;
try {
// So that when primary node's IndexWriter finishes a merge, but before it cuts over to the merged segment,
// it copies it out to the replicas. This ensures the whole system's NRT latency remains low even when a
// large merge completes:
writer.getConfig().setMergedSegmentWarmer(new PreCopyMergedSegmentWarmer(this));
message("IWC:\n" + writer.getConfig());
message("dir:\n" + writer.getDirectory());
message("commitData: " + writer.getLiveCommitData());
// Record our primaryGen in the userData, and set initial version to 0:
Map<String,String> commitData = new HashMap<>();
Iterable<Map.Entry<String,String>> iter = writer.getLiveCommitData();
if (iter != null) {
for(Map.Entry<String,String> ent : iter) {
commitData.put(ent.getKey(), ent.getValue());
}
}
commitData.put(PRIMARY_GEN_KEY, Long.toString(primaryGen));
if (commitData.get(VERSION_KEY) == null) {
commitData.put(VERSION_KEY, "0");
message("add initial commitData version=0");
} else {
message("keep current commitData version=" + commitData.get(VERSION_KEY));
}
writer.setLiveCommitData(commitData.entrySet(), false);
// We forcefully advance the SIS version to an unused future version. This is necessary if the previous primary crashed and we are
// starting up on an "older" index, else versions can be illegally reused but show different results:
if (forcePrimaryVersion != -1) {
message("now forcePrimaryVersion to version=" + forcePrimaryVersion);
writer.advanceSegmentInfosVersion(forcePrimaryVersion);
}
mgr = new SearcherManager(writer, true, true, searcherFactory);
setCurrentInfos(Collections.<String>emptySet());
message("init: infos version=" + curInfos.getVersion());
} catch (Throwable t) {
message("init: exception");
t.printStackTrace(printStream);
throw new RuntimeException(t);
}
}
public Node(int id, Directory dir, SearcherFactory searcherFactory, PrintStream printStream) {
this.id = id;
this.dir = dir;
this.searcherFactory = searcherFactory;
this.printStream = printStream;
}
/** Creates near-real-time searcher and taxonomy reader
* from the corresponding writers. */
public SearcherTaxonomyManager(IndexWriter writer, SearcherFactory searcherFactory, DirectoryTaxonomyWriter taxoWriter) throws IOException {
this(writer, true, searcherFactory, taxoWriter);
}
public void testRandomOperations() throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setMergePolicy(new FilterMergePolicy(newMergePolicy()) {
boolean keepFullyDeletedSegment = random().nextBoolean();
@Override
public boolean keepFullyDeletedSegment(IOSupplier<CodecReader> readerIOSupplier) {
return keepFullyDeletedSegment;
}
});
try (Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, iwc);
SearcherManager sm = new SearcherManager(writer, new SearcherFactory())) {
Semaphore numOperations = new Semaphore(10 + random().nextInt(1000));
boolean singleDoc = random().nextBoolean();
Thread[] threads = new Thread[1 + random().nextInt(4)];
CountDownLatch latch = new CountDownLatch(threads.length);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(() -> {
latch.countDown();
try {
latch.await();
while (numOperations.tryAcquire()) {
String id = singleDoc ? "1" : Integer.toString(random().nextInt(10));
Document doc = new Document();
doc.add(new StringField("id", id, Field.Store.YES));
if (random().nextInt(10) <= 2) {
writer.updateDocument(new Term("id", id), doc);
} else if (random().nextInt(10) <= 2) {
writer.deleteDocuments(new Term("id", id));
} else {
writer.addDocument(doc);
}
if (random().nextInt(100) < 10) {
sm.maybeRefreshBlocking();
}
if (random().nextInt(100) < 5) {
writer.commit();
}
if (random().nextInt(100) < 1) {
writer.forceMerge(1 + random().nextInt(10), random().nextBoolean());
}
}
} catch (Exception e) {
throw new AssertionError(e);
}
});
threads[i].start();
}
for (Thread thread : threads) {
thread.join();
}
}
}
public void testRandomOperationsWithSoftDeletes() throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
AtomicInteger seqNo = new AtomicInteger(-1);
AtomicInteger retainingSeqNo = new AtomicInteger();
iwc.setSoftDeletesField("soft_deletes");
iwc.setMergePolicy(new SoftDeletesRetentionMergePolicy("soft_deletes",
() -> LongPoint.newRangeQuery("seq_no", retainingSeqNo.longValue(), Long.MAX_VALUE), newMergePolicy()));
try (Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, iwc);
SearcherManager sm = new SearcherManager(writer, new SearcherFactory())) {
Semaphore numOperations = new Semaphore(10 + random().nextInt(1000));
boolean singleDoc = random().nextBoolean();
Thread[] threads = new Thread[1 + random().nextInt(4)];
CountDownLatch latch = new CountDownLatch(threads.length);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread(() -> {
latch.countDown();
try {
latch.await();
while (numOperations.tryAcquire()) {
String id = singleDoc ? "1" : Integer.toString(random().nextInt(10));
Document doc = new Document();
doc.add(new StringField("id", id, Field.Store.YES));
doc.add(new LongPoint("seq_no", seqNo.getAndIncrement()));
if (random().nextInt(10) <= 2) {
if (random().nextBoolean()) {
doc.add(new NumericDocValuesField(iwc.softDeletesField, 1));
}
writer.softUpdateDocument(new Term("id", id), doc, new NumericDocValuesField(iwc.softDeletesField, 1));
} else {
writer.addDocument(doc);
}
if (random().nextInt(100) < 10) {
int min = retainingSeqNo.get();
int max = seqNo.get();
if (min < max && random().nextBoolean()) {
retainingSeqNo.compareAndSet(min, min - random().nextInt(max - min));
}
}
if (random().nextInt(100) < 10) {
sm.maybeRefreshBlocking();
}
if (random().nextInt(100) < 5) {
writer.commit();
}
if (random().nextInt(100) < 1) {
writer.forceMerge(1 + random().nextInt(10), random().nextBoolean());
}
}
} catch (Exception e) {
throw new AssertionError(e);
}
});
threads[i].start();
}
for (Thread thread : threads) {
thread.join();
}
}
}
public void testMaxCompletedSequenceNumber() throws IOException, InterruptedException {
try (Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig());) {
assertEquals(1, writer.addDocument(new Document()));
assertEquals(2, writer.updateDocument(new Term("foo", "bar"), new Document()));
writer.flushNextBuffer();
assertEquals(3, writer.commit());
assertEquals(4, writer.addDocument(new Document()));
assertEquals(4, writer.getMaxCompletedSequenceNumber());
// commit moves seqNo by 2 since there is one DWPT that could still be in-flight
assertEquals(6, writer.commit());
assertEquals(6, writer.getMaxCompletedSequenceNumber());
assertEquals(7, writer.addDocument(new Document()));
writer.getReader().close();
// getReader moves seqNo by 2 since there is one DWPT that could still be in-flight
assertEquals(9, writer.getMaxCompletedSequenceNumber());
}
try (Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
SearcherManager manager = new SearcherManager(writer, new SearcherFactory())) {
CountDownLatch start = new CountDownLatch(1);
int numDocs = 100 + random().nextInt(500);
AtomicLong maxCompletedSeqID = new AtomicLong(-1);
Thread[] threads = new Thread[2 + random().nextInt(2)];
for (int i = 0; i < threads.length; i++) {
int idx = i;
threads[i] = new Thread(() -> {
try {
start.await();
for (int j = 0; j < numDocs; j++) {
Document doc = new Document();
String id = idx +"-"+j;
doc.add(new StringField("id", id, Field.Store.NO));
long seqNo = writer.addDocument(doc);
if (maxCompletedSeqID.get() < seqNo) {
long maxCompletedSequenceNumber = writer.getMaxCompletedSequenceNumber();
manager.maybeRefreshBlocking();
maxCompletedSeqID.updateAndGet(oldVal-> Math.max(oldVal, maxCompletedSequenceNumber));
}
IndexSearcher acquire = manager.acquire();
try {
assertEquals(1, acquire.search(new TermQuery(new Term("id", id)), 10).totalHits.value);
} finally {
manager.release(acquire);
}
}
} catch (Exception e) {
throw new AssertionError(e);
}
});
threads[i].start();
}
start.countDown();
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
}
}
public void testTryDeleteDocument ()
throws IOException
{
Directory directory = createIndex();
IndexWriter writer = getWriter(directory);
ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
new SearcherFactory());
IndexSearcher searcher = mgr.acquire();
TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
100);
assertEquals(1, topDocs.totalHits.value);
long result;
if (random().nextBoolean()) {
IndexReader r = DirectoryReader.open(writer);
result = writer.tryDeleteDocument(r, 0);
r.close();
} else {
result = writer.tryDeleteDocument(searcher.getIndexReader(), 0);
}
// The tryDeleteDocument should have succeeded:
assertTrue(result != -1);
assertTrue(writer.hasDeletions());
if (random().nextBoolean()) {
writer.commit();
}
assertTrue(writer.hasDeletions());
mgr.maybeRefresh();
searcher = mgr.acquire();
topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
assertEquals(0, topDocs.totalHits.value);
}
public Searcher(FullTextIndexShared index, Analyzer analyzer) throws IOException {
this.index = index;
this.searcherManager = new SearcherManager(index.open(), new SearcherFactory());
}
public void testDeleteDocuments ()
throws IOException
{
Directory directory = createIndex();
IndexWriter writer = getWriter(directory);
ReferenceManager<IndexSearcher> mgr = new SearcherManager(writer,
new SearcherFactory());
IndexSearcher searcher = mgr.acquire();
TopDocs topDocs = searcher.search(new TermQuery(new Term("foo", "0")),
100);
assertEquals(1, topDocs.totalHits.value);
long result = writer.deleteDocuments(new TermQuery(new Term("foo", "0")));
assertTrue(result != -1);
// writer.commit();
assertTrue(writer.hasDeletions());
mgr.maybeRefresh();
searcher = mgr.acquire();
topDocs = searcher.search(new TermQuery(new Term("foo", "0")), 100);
assertEquals(0, topDocs.totalHits.value);
}