类org.apache.lucene.index.FilterLeafReader源码实例Demo

下面列出了怎么用org.apache.lucene.index.FilterLeafReader的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: TestQueryBitSetProducer.java
public DummyDirectoryReader(DirectoryReader in) throws IOException {
  super(in, new SubReaderWrapper() {
    @Override
    public LeafReader wrap(LeafReader reader) {
      return new FilterLeafReader(reader) {

        @Override
        public CacheHelper getCoreCacheHelper() {
          return null;
        }

        @Override
        public CacheHelper getReaderCacheHelper() {
          return null;
        }};
    }
  });
}
 
源代码2 项目: lucene-solr   文件: TestLRUQueryCache.java
public DummyDirectoryReader(DirectoryReader in) throws IOException {
  super(in, new SubReaderWrapper() {
    @Override
    public LeafReader wrap(LeafReader reader) {
      return new FilterLeafReader(reader) {
        @Override
        public CacheHelper getCoreCacheHelper() {
          return null;
        }
        @Override
        public CacheHelper getReaderCacheHelper() {
          return null;
        }
      };
    }
  });
}
 
源代码3 项目: lucene-solr   文件: LukeRequestHandler.java
/** Returns the sum of RAM bytes used by each segment */
private static long getIndexHeapUsed(DirectoryReader reader) {
  return reader.leaves().stream()
      .map(LeafReaderContext::reader)
      .map(FilterLeafReader::unwrap)
      .map(leafReader -> {
        if (leafReader instanceof Accountable) {
          return ((Accountable) leafReader).ramBytesUsed();
        } else {
          return -1L; // unsupported
        }
      })
      .mapToLong(Long::longValue)
      .reduce(0, (left, right) -> left == -1 || right == -1 ? -1 : left + right);
  // if any leaves are unsupported (-1), we ultimately return -1.
}
 
源代码4 项目: Elasticsearch   文件: Engine.java
/**
 * Tries to extract a segment reader from the given index reader.
 * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
 */
protected static SegmentReader segmentReader(LeafReader reader) {
    if (reader instanceof SegmentReader) {
        return (SegmentReader) reader;
    } else if (reader instanceof FilterLeafReader) {
        final FilterLeafReader fReader = (FilterLeafReader) reader;
        return segmentReader(FilterLeafReader.unwrap(fReader));
    }
    // hard fail - we can't get a SegmentReader
    throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
}
 
@Override
public LeafReader wrap(LeafReader reader) {
  return new FilterLeafReader(reader) {
    BitSet seenDocIDs = new BitSet();

    @Override
    public Fields getTermVectors(int docID) throws IOException {
      // if we're invoked by ParallelLeafReader then we can't do our assertion. TODO see LUCENE-6868
      if (callStackContains(ParallelLeafReader.class) == false
          && callStackContains(CheckIndex.class) == false) {
        assertFalse("Should not request TVs for doc more than once.", seenDocIDs.get(docID));
        seenDocIDs.set(docID);
      }

      return super.getTermVectors(docID);
    }

    @Override
    public CacheHelper getCoreCacheHelper() {
      return null;
    }

    @Override
    public CacheHelper getReaderCacheHelper() {
      return null;
    }
  };
}
 
源代码6 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
@Override
public long ramBytesUsed() {
  long mem = RamUsageEstimator.shallowSizeOf(this);
  try {
    if (searcherMgr != null) {
      SearcherManager mgr;
      IndexSearcher searcher;
      synchronized (searcherMgrLock) {
        mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
        searcher = mgr.acquire();
      }
      try {
        for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
          LeafReader reader = FilterLeafReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            mem += ((SegmentReader) context.reader()).ramBytesUsed();
          }
        }
      } finally {
        mgr.release(searcher);
      }
    }
    return mem;
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
源代码7 项目: lucene-solr   文件: AnalyzingInfixSuggester.java
@Override
public Collection<Accountable> getChildResources() {
  List<Accountable> resources = new ArrayList<>();
  try {
    if (searcherMgr != null) {
      SearcherManager mgr;
      IndexSearcher searcher;
      synchronized (searcherMgrLock) {
        mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
        searcher = mgr.acquire();
      }
      try {
        for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
          LeafReader reader = FilterLeafReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            resources.add(Accountables.namedAccountable("segment", (SegmentReader)reader));
          }
        }
      } finally {
        mgr.release(searcher);
      }
    }
    return Collections.unmodifiableList(resources);
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
源代码8 项目: lucene-solr   文件: TestTermScorer.java
public void testDoesNotLoadNorms() throws IOException {
  Term allTerm = new Term(FIELD, "all");
  TermQuery termQuery = new TermQuery(allTerm);
  
  LeafReader forbiddenNorms = new FilterLeafReader(indexReader) {
    @Override
    public NumericDocValues getNormValues(String field) throws IOException {
      fail("Norms should not be loaded");
      // unreachable
      return null;
    }

    @Override
    public CacheHelper getCoreCacheHelper() {
      return in.getCoreCacheHelper();
    }

    @Override
    public CacheHelper getReaderCacheHelper() {
      return in.getReaderCacheHelper();
    }
  };
  // We don't use newSearcher because it sometimes runs checkIndex which loads norms
  IndexSearcher indexSearcher = new IndexSearcher(forbiddenNorms);
  
  Weight weight = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE, 1);
  expectThrows(AssertionError.class, () -> {
    weight.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
  });
  
  Weight weight2 = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE_NO_SCORES, 1);
  // should not fail this time since norms are not necessary
  weight2.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
}
 
源代码9 项目: lucene-solr   文件: TestSearcherManager.java
public MyFilterDirectoryReader(DirectoryReader in) throws IOException {
  super(in, 
        new FilterDirectoryReader.SubReaderWrapper() {
          @Override
          public LeafReader wrap(LeafReader reader) {
            FilterLeafReader wrapped = new MyFilterLeafReader(reader);
            assertEquals(reader, wrapped.getDelegate());
            return wrapped;
          }
        });
}
 
源代码10 项目: lucene-solr   文件: LegacyNumericUtils.java
private static Terms intTerms(Terms terms) {
  return new FilterLeafReader.FilterTerms(terms) {
      @Override
      public TermsEnum iterator() throws IOException {
        return filterPrefixCodedInts(in.iterator());
      }
    };
}
 
源代码11 项目: lucene-solr   文件: LegacyNumericUtils.java
private static Terms longTerms(Terms terms) {
  return new FilterLeafReader.FilterTerms(terms) {
      @Override
      public TermsEnum iterator() throws IOException {
        return filterPrefixCodedLongs(in.iterator());
      }
    };
}
 
源代码12 项目: crate   文件: ElasticsearchLeafReader.java
public static ElasticsearchLeafReader getElasticsearchLeafReader(LeafReader reader) {
    if (reader instanceof FilterLeafReader) {
        if (reader instanceof ElasticsearchLeafReader) {
            return (ElasticsearchLeafReader) reader;
        } else {
            // We need to use FilterLeafReader#getDelegate and not FilterLeafReader#unwrap, because
            // If there are multiple levels of filtered leaf readers then with the unwrap() method it immediately
            // returns the most inner leaf reader and thus skipping of over any other filtered leaf reader that
            // may be instance of ElasticsearchLeafReader. This can cause us to miss the shardId.
            return getElasticsearchLeafReader(((FilterLeafReader) reader).getDelegate());
        }
    }
    return null;
}
 
源代码13 项目: crate   文件: FieldMaskingReader.java
public FieldMaskingReader(String field, DirectoryReader in) throws IOException {
    super(in, new FilterDirectoryReader.SubReaderWrapper() {
        @Override
        public LeafReader wrap(LeafReader reader) {
            return new FilterLeafReader(new FieldFilterLeafReader(reader, Collections.singleton(field), true)) {

                // FieldFilterLeafReader does not forward cache helpers
                // since it considers it is illegal because of the fact
                // that it changes the content of the index. However we
                // want this behavior for tests, and security plugins
                // are careful to only use the cache when it's valid

                @Override
                public CacheHelper getReaderCacheHelper() {
                    return reader.getReaderCacheHelper();
                }

                @Override
                public CacheHelper getCoreCacheHelper() {
                    return reader.getCoreCacheHelper();
                }
            };
        }
    });
    this.field = field;

}
 
源代码14 项目: lucene-solr   文件: SegmentsInfoRequestHandler.java
private SimpleOrderedMap<Object> getSegmentInfo(
    SegmentCommitInfo segmentCommitInfo, boolean withSizeInfo, boolean withFieldInfos,
    List<LeafReaderContext> leafContexts, IndexSchema schema) throws IOException {
  SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();

  segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
  segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
  segmentInfoMap.add("softDelCount", segmentCommitInfo.getSoftDelCount());
  segmentInfoMap.add("hasFieldUpdates", segmentCommitInfo.hasFieldUpdates());
  segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
  segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
  Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
      .get("timestamp"));
  segmentInfoMap.add("age", new Date(timestamp));
  segmentInfoMap.add("source",
      segmentCommitInfo.info.getDiagnostics().get("source"));
  segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
  // don't open a new SegmentReader - try to find the right one from the leaf contexts
  SegmentReader seg = null;
  for (LeafReaderContext lrc : leafContexts) {
    LeafReader leafReader = lrc.reader();
    leafReader = FilterLeafReader.unwrap(leafReader);
    if (leafReader instanceof SegmentReader) {
      SegmentReader sr = (SegmentReader)leafReader;
      if (sr.getSegmentInfo().info.equals(segmentCommitInfo.info)) {
        seg = sr;
        break;
      }
    }
  }
  if (seg != null) {
    LeafMetaData metaData = seg.getMetaData();
    if (metaData != null) {
      segmentInfoMap.add("createdVersionMajor", metaData.getCreatedVersionMajor());
      segmentInfoMap.add("minVersion", metaData.getMinVersion().toString());
      if (metaData.getSort() != null) {
        segmentInfoMap.add("sort", metaData.getSort().toString());
      }
    }
  }
  if (!segmentCommitInfo.info.getDiagnostics().isEmpty()) {
    segmentInfoMap.add("diagnostics", segmentCommitInfo.info.getDiagnostics());
  }
  if (!segmentCommitInfo.info.getAttributes().isEmpty()) {
    segmentInfoMap.add("attributes", segmentCommitInfo.info.getAttributes());
  }
  if (withSizeInfo) {
    Directory dir = segmentCommitInfo.info.dir;
    List<Pair<String, Long>> files = segmentCommitInfo.files().stream()
        .map(f -> {
          long size = -1;
          try {
            size = dir.fileLength(f);
          } catch (IOException e) {
          }
          return new Pair<String, Long>(f, size);
        }).sorted((p1, p2) -> {
          if (p1.second() > p2.second()) {
            return -1;
          } else if (p1.second() < p2.second()) {
            return 1;
          } else {
            return 0;
          }
        }).collect(Collectors.toList());
    if (!files.isEmpty()) {
      SimpleOrderedMap<Object> topFiles = new SimpleOrderedMap<>();
      for (int i = 0; i < Math.min(files.size(), 5); i++) {
        Pair<String, Long> p = files.get(i);
        topFiles.add(p.first(), RamUsageEstimator.humanReadableUnits(p.second()));
      }
      segmentInfoMap.add("largestFiles", topFiles);
    }
  }
  if (seg != null && withSizeInfo) {
    SimpleOrderedMap<Object> ram = new SimpleOrderedMap<>();
    ram.add("total", seg.ramBytesUsed());
    for (Accountable ac : seg.getChildResources()) {
      accountableToMap(ac, ram::add);
    }
    segmentInfoMap.add("ramBytesUsed", ram);
  }
  if (withFieldInfos) {
    if (seg == null) {
      log.debug("Skipping segment info - not available as a SegmentReader: {}", segmentCommitInfo);
    } else {
      FieldInfos fis = seg.getFieldInfos();
      SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
      for (FieldInfo fi : fis) {
        fields.add(fi.name, getFieldInfo(seg, fi, schema));
      }
      segmentInfoMap.add("fields", fields);
    }
  }

  return segmentInfoMap;
}
 
 类所在包
 类方法
 同包方法