下面列出了org.apache.lucene.index.AtomicReaderContext#reader ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private IterableRow getIterableRow(String rowId, IndexSearcherCloseable searcher) throws IOException {
IndexReader indexReader = searcher.getIndexReader();
BytesRef rowIdRef = new BytesRef(rowId);
List<AtomicReaderTermsEnum> possibleRowIds = new ArrayList<AtomicReaderTermsEnum>();
for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
AtomicReader atomicReader = atomicReaderContext.reader();
Fields fields = atomicReader.fields();
if (fields == null) {
continue;
}
Terms terms = fields.terms(BlurConstants.ROW_ID);
if (terms == null) {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
if (!termsEnum.seekExact(rowIdRef, true)) {
continue;
}
// need atomic read as well...
possibleRowIds.add(new AtomicReaderTermsEnum(atomicReader, termsEnum));
}
if (possibleRowIds.isEmpty()) {
return null;
}
return new IterableRow(rowId, getRecords(possibleRowIds));
}
@Test
public void testDocValuesFormat() throws IOException {
RAMDirectory directory = new RAMDirectory();
IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43));
conf.setCodec(new Blur024Codec());
IndexWriter writer = new IndexWriter(directory, conf);
Document doc = new Document();
doc.add(new StringField("f", "v", Store.YES));
doc.add(new SortedDocValuesField("f", new BytesRef("v")));
writer.addDocument(doc);
writer.close();
DirectoryReader reader = DirectoryReader.open(directory);
AtomicReaderContext context = reader.leaves().get(0);
AtomicReader atomicReader = context.reader();
SortedDocValues sortedDocValues = atomicReader.getSortedDocValues("f");
assertTrue(sortedDocValues.getClass().getName().startsWith(DiskDocValuesProducer.class.getName()));
reader.close();
}
public SecureIndexSearcher(IndexReaderContext context, ExecutorService executor,
AccessControlFactory accessControlFactory, Collection<String> readAuthorizations,
Collection<String> discoverAuthorizations, Set<String> discoverableFields, String defaultReadMaskMessage)
throws IOException {
super(context, executor);
_accessControlFactory = accessControlFactory;
_readAuthorizations = readAuthorizations;
_discoverAuthorizations = discoverAuthorizations;
_discoverableFields = discoverableFields;
_defaultReadMaskMessage = defaultReadMaskMessage;
_accessControlReader = _accessControlFactory.getReader(readAuthorizations, discoverAuthorizations,
discoverableFields, _defaultReadMaskMessage);
_secureIndexReader = getSecureIndexReader(context);
List<AtomicReaderContext> leaves = _secureIndexReader.leaves();
_leaveMap = new HashMap<Object, AtomicReaderContext>();
for (AtomicReaderContext atomicReaderContext : leaves) {
AtomicReader atomicReader = atomicReaderContext.reader();
SecureAtomicReader secureAtomicReader = (SecureAtomicReader) atomicReader;
AtomicReader originalReader = secureAtomicReader.getOriginalReader();
Object coreCacheKey = originalReader.getCoreCacheKey();
_leaveMap.put(coreCacheKey, atomicReaderContext);
}
}
public void addScorers(AtomicReaderContext context, Scorer[] scorers) throws IOException {
LOG.debug(getPrefix("adding scorers context [{0}] [{1}]"), context, Arrays.asList(scorers));
if (scorers.length != _length) {
throw new IOException("Scorer length is not correct expecting [" + _length + "] actual [" + scorers.length + "]");
}
Object key = getKey(context);
Info info = _infoMap.get(key);
if (info == null) {
info = new Info(context, scorers, _locks, _instance);
_infoMap.put(key, info);
} else {
AtomicReader reader = context.reader();
LOG.warn(getPrefix("Info about reader context [{0}] already created, existing Info [{1}] current reader [{2}]."),
context, info, reader);
}
}
private long getTotalNumberOfRowIds(DirectoryReader reader) throws IOException {
long total = 0;
List<AtomicReaderContext> leaves = reader.leaves();
for (AtomicReaderContext context : leaves) {
AtomicReader atomicReader = context.reader();
Terms terms = atomicReader.terms(BlurConstants.ROW_ID);
long expectedInsertions = terms.size();
if (expectedInsertions < 0) {
return -1;
}
total += expectedInsertions;
}
return total;
}
private void applyDeletes(Directory directory, IndexWriter indexWriter, IndexSearcherCloseable searcher,
String shard, boolean emitDeletes, Configuration configuration) throws IOException {
DirectoryReader newReader = DirectoryReader.open(directory);
try {
List<AtomicReaderContext> newLeaves = newReader.getContext().leaves();
BlurPartitioner blurPartitioner = new BlurPartitioner();
Text key = new Text();
int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount();
int shardId = ShardUtil.getShardIndex(shard);
Action action = new Action() {
@Override
public void found(AtomicReader reader, Bits liveDocs, TermsEnum termsEnum) throws IOException {
DocsEnum docsEnum = termsEnum.docs(liveDocs, null);
if (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
indexWriter.deleteDocuments(new Term(BlurConstants.ROW_ID, BytesRef.deepCopyOf(termsEnum.term())));
}
}
};
LOG.info("Applying deletes for table [{0}] shard [{1}] new reader [{2}]", _table, shard, newReader);
boolean skipCheckRowIds = isInternal(newReader);
LOG.info("Skip rowid check [{0}] for table [{1}] shard [{2}] new reader [{3}]", skipCheckRowIds, _table, shard,
newReader);
for (AtomicReaderContext context : newLeaves) {
AtomicReader newAtomicReader = context.reader();
if (isFastRowIdDeleteSupported(newAtomicReader)) {
runNewRowIdCheckAndDelete(indexWriter, emitDeletes, blurPartitioner, key, numberOfShards, shardId,
newAtomicReader, skipCheckRowIds);
} else {
runOldMergeSortRowIdCheckAndDelete(emitDeletes, searcher.getIndexReader(), blurPartitioner, key,
numberOfShards, shardId, action, newAtomicReader);
}
}
} finally {
newReader.close();
}
}
@Override
public Boolean execute(IndexContext context) throws IOException, InterruptedException {
try {
IndexReader indexReader = context.getIndexReader();
while (true) {
long hash = 0;
for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
AtomicReader reader = atomicReaderContext.reader();
for (String field : reader.fields()) {
Terms terms = reader.terms(field);
BytesRef bytesRef;
TermsEnum iterator = terms.iterator(null);
while ((bytesRef = iterator.next()) != null) {
hash += bytesRef.hashCode();
}
}
}
System.out.println("hashcode = " + hash);
}
} catch (IOException e) {
e.printStackTrace();
throw e;
} catch (Throwable t) {
t.printStackTrace();
if (t instanceof InterruptedException) {
throw t;
} else if (t instanceof RuntimeException) {
throw (RuntimeException) t;
}
throw new RuntimeException(t);
}
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
List<DocIdSet> list = new ArrayList<DocIdSet>();
Fields fields = reader.fields();
Terms terms = fields.terms(_fieldName);
if (terms == null) {
// if field is not present then show nothing.
return DocIdSet.EMPTY_DOCIDSET;
}
TermsEnum iterator = terms.iterator(null);
BytesRef bytesRef;
DocumentVisibilityEvaluator visibilityEvaluator = new DocumentVisibilityEvaluator(_authorizations);
while ((bytesRef = iterator.next()) != null) {
if (isVisible(visibilityEvaluator, bytesRef)) {
DocIdSet docIdSet = _filterCacheStrategy.getDocIdSet(_fieldName, bytesRef, reader);
if (docIdSet != null) {
list.add(docIdSet);
} else {
// Do not use acceptDocs because we want the acl cache to be version
// agnostic.
DocsEnum docsEnum = iterator.docs(null, null);
list.add(buildCache(reader, docsEnum, bytesRef));
}
}
}
return getLogicalOr(list);
}
Info(AtomicReaderContext context, Scorer[] scorers, Lock[] locks, String instance) {
AtomicReader reader = context.reader();
_instance = instance;
_bitSet = new OpenBitSet(reader.maxDoc());
_scorers = scorers;
_reader = reader;
_readerStr = _reader.toString();
_maxDoc = _reader.maxDoc();
_locks = locks;
}
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
Object key = reader.getCoreCacheKey();
DocIdSet docIdSet = _cache.get(key);
if (docIdSet != null) {
_hits.incrementAndGet();
return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
// This will only allow a single instance be created per reader per filter
Object lock = getLock(key);
synchronized (lock) {
SegmentReader segmentReader = getSegmentReader(reader);
if (segmentReader == null) {
LOG.warn("Could not find SegmentReader from [{0}]", reader);
return _filter.getDocIdSet(context, acceptDocs);
}
Directory directory = getDirectory(segmentReader);
if (directory == null) {
LOG.warn("Could not find Directory from [{0}]", segmentReader);
return _filter.getDocIdSet(context, acceptDocs);
}
_misses.incrementAndGet();
String segmentName = segmentReader.getSegmentName();
docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
_cache.put(key, docIdSet);
return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
}
}
private void checkTerms(IndexSearcher searcher, String fieldName) throws IOException {
IndexReader reader = searcher.getIndexReader();
for (AtomicReaderContext context : reader.leaves()) {
AtomicReader atomicReader = context.reader();
Fields fields = atomicReader.fields();
Terms terms = fields.terms(fieldName);
TermsEnum iterator = terms.iterator(null);
BytesRef bytesRef = iterator.next();
if (bytesRef != null) {
System.out.println(bytesRef.utf8ToString());
fail("There are only restricted terms for this field [" + fieldName + "]");
}
}
}
@Override
public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException {
return new ImageScorer(context.reader(), acceptDocs, this);
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
reader = context.reader();
}
private void checkForMemoryLeaks(List<AtomicReaderContext> leaves, String message) {
for (AtomicReaderContext context : leaves) {
AtomicReader reader = context.reader();
MemoryLeakDetector.record(reader, message, _tableContext.getTable(), _shardContext.getShard());
}
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
this.arc = context;
this.currentReader = context.reader();
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
this.arc = context;
this.currentReader = context.reader();
}