类org.apache.lucene.search.MatchAllDocsQuery源码实例Demo

下面列出了怎么用org.apache.lucene.search.MatchAllDocsQuery的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: lucene-solr   文件: RecoveryStrategy.java
final private void cloudDebugLog(SolrCore core, String op) {
  if (!log.isDebugEnabled()) {
    return;
  }
  try {
    RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
    SolrIndexSearcher searcher = searchHolder.get();
    try {
      final int totalHits = searcher.count(new MatchAllDocsQuery());
      final String nodeName = core.getCoreContainer().getZkController().getNodeName();
      log.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
    } finally {
      searchHolder.decref();
    }
  } catch (Exception e) {
    log.debug("Error in solrcloud_debug block", e);
  }
}
 
源代码2 项目: lucene-solr   文件: AssociationsFacetsExample.java
/** User runs a query and aggregates facets by summing their association values. */
private List<FacetResult> sumAssociations() throws IOException {
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  IndexSearcher searcher = new IndexSearcher(indexReader);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
  
  FacetsCollector fc = new FacetsCollector();
  
  // MatchAllDocsQuery is for "browsing" (counts facets
  // for all non-deleted docs in the index); normally
  // you'd use a "normal" query:
  FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);
  
  Facets tags = new TaxonomyFacetSumIntAssociations("$tags", taxoReader, config, fc);
  Facets genre = new TaxonomyFacetSumFloatAssociations("$genre", taxoReader, config, fc);

  // Retrieve results
  List<FacetResult> results = new ArrayList<>();
  results.add(tags.getTopChildren(10, "tags"));
  results.add(genre.getTopChildren(10, "genre"));

  indexReader.close();
  taxoReader.close();
  
  return results;
}
 
源代码3 项目: lucene-solr   文件: TestFunctionScoreQuery.java
public void testTruncateNegativeScores() throws IOException {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
  Document doc = new Document();
  doc.add(new NumericDocValuesField("foo", -2));
  w.addDocument(doc);
  IndexReader reader = DirectoryReader.open(w);
  w.close();
  IndexSearcher searcher = newSearcher(reader);
  Query q = new FunctionScoreQuery(new MatchAllDocsQuery(), DoubleValuesSource.fromLongField("foo"));
  QueryUtils.check(random(), q, searcher);
  Explanation expl = searcher.explain(q, 0);
  assertEquals(0, expl.getValue().doubleValue(), 0f);
  assertTrue(expl.toString(), expl.getDetails()[0].getDescription().contains("truncated score"));
  reader.close();
  dir.close();
}
 
源代码4 项目: lucene-solr   文件: TestFunctionScoreQuery.java
public void testNaN() throws IOException {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
  Document doc = new Document();
  doc.add(new NumericDocValuesField("foo", Double.doubleToLongBits(Double.NaN)));
  w.addDocument(doc);
  IndexReader reader = DirectoryReader.open(w);
  w.close();
  IndexSearcher searcher = newSearcher(reader);
  Query q = new FunctionScoreQuery(new MatchAllDocsQuery(), DoubleValuesSource.fromDoubleField("foo"));
  QueryUtils.check(random(), q, searcher);
  Explanation expl = searcher.explain(q, 0);
  assertEquals(0, expl.getValue().doubleValue(), 0f);
  assertTrue(expl.toString(), expl.getDetails()[0].getDescription().contains("NaN is an illegal score"));
  reader.close();
  dir.close();
}
 
源代码5 项目: lucene-solr   文件: TestFieldCacheSort.java
/** test that we throw exception on multi-valued field, creates corrupt reader, use SORTED_SET instead */
public void testMultiValuedField() throws IOException {
  Directory indexStore = newDirectory();
  IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random())));
  for(int i=0; i<5; i++) {
      Document doc = new Document();
      doc.add(new StringField("string", "a"+i, Field.Store.NO));
      doc.add(new StringField("string", "b"+i, Field.Store.NO));
      writer.addDocument(doc);
  }
  writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
  writer.close();
  Sort sort = new Sort(
      new SortField("string", SortField.Type.STRING),
      SortField.FIELD_DOC);
  IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore),
                       Collections.singletonMap("string", Type.SORTED));
  IndexSearcher searcher = new IndexSearcher(reader);
  expectThrows(IllegalStateException.class, () -> {
    searcher.search(new MatchAllDocsQuery(), 500, sort);
  });
  reader.close();
  indexStore.close();
}
 
源代码6 项目: lucene-solr   文件: QueryUtils.java
/**
 * Combines a scoring query with a non-scoring (filter) query.
 * If both parameters are null then return a {@link MatchAllDocsQuery}.
 * If only {@code scoreQuery} is present then return it.
 * If only {@code filterQuery} is present then return it wrapped with constant scoring.
 * If neither are null then we combine with a BooleanQuery.
 */
public static Query combineQueryAndFilter(Query scoreQuery, Query filterQuery) {
  // check for *:* is simple and avoids needless BooleanQuery wrapper even though BQ.rewrite optimizes this away
  if (scoreQuery == null || scoreQuery instanceof MatchAllDocsQuery) {
    if (filterQuery == null) {
      return new MatchAllDocsQuery(); // default if nothing -- match everything
    } else {
      return new ConstantScoreQuery(filterQuery);
    }
  } else {
    if (filterQuery == null || filterQuery instanceof MatchAllDocsQuery) {
      return scoreQuery;
    } else {
      return new BooleanQuery.Builder()
          .add(scoreQuery, Occur.MUST)
          .add(filterQuery, Occur.FILTER)
          .build();
    }
  }
}
 
private static Facets getAllFacets(IndexSearcher searcher, SortedSetDocValuesReaderState state,
                                   ExecutorService exec) throws IOException, InterruptedException {
  if (random().nextBoolean()) {
    FacetsCollector c = new FacetsCollector();
    searcher.search(new MatchAllDocsQuery(), c);
    if (exec != null) {
      return new ConcurrentSortedSetDocValuesFacetCounts(state, c, exec);
    } else {
      return new SortedSetDocValuesFacetCounts(state, c);
    }
  } else if (exec != null) {
    return new ConcurrentSortedSetDocValuesFacetCounts(state, exec);
  } else {
    return new SortedSetDocValuesFacetCounts(state);
  }
}
 
源代码8 项目: liresolr   文件: LireRequestHandler.java
/**
 * Returns a random set of documents from the index. Mainly for testing purposes.
 *
 * @param req
 * @param rsp
 * @throws IOException
 */
private void handleRandomSearch(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
    SolrIndexSearcher searcher = req.getSearcher();
    Query query = new MatchAllDocsQuery();
    DocList docList = searcher.getDocList(query, getFilterQueries(req), Sort.RELEVANCE, 0, numberOfCandidateResults, 0);
    int paramRows = Math.min(req.getParams().getInt("rows", defaultNumberOfResults), docList.size());
    if (docList.size() < 1) {
        rsp.add("Error", "No documents in index");
    } else {
        LinkedList list = new LinkedList();
        while (list.size() < paramRows) {
            DocList auxList = docList.subset((int) (Math.random() * docList.size()), 1);
            Document doc = null;
            for (DocIterator it = auxList.iterator(); it.hasNext(); ) {
                doc = searcher.doc(it.nextDoc());
            }
            if (!list.contains(doc)) {
                list.add(doc);
            }
        }
        rsp.addResponse(list);
    }
}
 
源代码9 项目: crate   文件: GroupingLongCollectorBenchmark.java
@Benchmark
public LongObjectHashMap<Long> measureGroupingOnSortedNumericDocValues() throws Exception {
    var weight = searcher.createWeight(new MatchAllDocsQuery(), ScoreMode.COMPLETE_NO_SCORES, 1.0f);
    var leaf = searcher.getTopReaderContext().leaves().get(0);
    var scorer = weight.scorer(leaf);
    var docValues = DocValues.getSortedNumeric(leaf.reader(), "y");
    var docIt = scorer.iterator();
    LongObjectHashMap<Long> sumByKey = new LongObjectHashMap<>();
    for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) {
        if (docValues.advanceExact(docId)) {
            if (docValues.docValueCount() == 1) {
                long number = docValues.nextValue();
                sumByKey.compute(number, (key, oldValue) -> {
                    if (oldValue == null) {
                        return number;
                    } else {
                        return oldValue + number;
                    }
                });
            }
        }
    }
    return sumByKey;
}
 
源代码10 项目: crate   文件: IpColumnReferenceTest.java
@Test
public void testIpExpression() throws Exception {
    IpColumnReference columnReference = new IpColumnReference(IP_COLUMN);
    columnReference.startCollect(ctx);
    columnReference.setNextReader(readerContext);
    IndexSearcher searcher = new IndexSearcher(readerContext.reader());
    TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 21);
    assertThat(topDocs.scoreDocs.length, is(21));

    int i = 0;
    for (ScoreDoc doc : topDocs.scoreDocs) {
        columnReference.setNextDocId(doc.doc);
        if (i == 20) {
            assertThat(columnReference.value(), is(nullValue()));
        } else if (i < 10) {
            assertThat(columnReference.value(), is("192.168.0." + i));
        } else {
            assertThat(columnReference.value(),
                is("7bd0:8082:2df8:487e:e0df:e7b5:9362:" + Integer.toHexString(i)));
        }
        i++;
    }
}
 
源代码11 项目: lucene-solr   文件: TestLongValueFacetCounts.java
public void testOnlyBigLongs() throws Exception {
  Directory d = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), d);
  for (long l = 0; l < 3; l++) {
    Document doc = new Document();
    doc.add(new NumericDocValuesField("field", Long.MAX_VALUE - l));
    w.addDocument(doc);
  }

  IndexReader r = w.getReader();
  w.close();

  FacetsCollector fc = new FacetsCollector();
  IndexSearcher s = newSearcher(r);
  s.search(new MatchAllDocsQuery(), fc);

  LongValueFacetCounts facets = new LongValueFacetCounts("field", fc, false);

  FacetResult result = facets.getAllChildrenSortByValue();
  assertEquals("dim=field path=[] value=3 childCount=3\n  9223372036854775805 (1)\n  " +
               "9223372036854775806 (1)\n  9223372036854775807 (1)\n",
               result.toString());
  r.close();
  d.close();
}
 
源代码12 项目: lucene-solr   文件: TestBlockJoinValidation.java
public void testAdvanceValidationForToChildBjq() throws Exception {
  Query parentQuery = new MatchAllDocsQuery();
  ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter);

  final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0);
  Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
  Scorer scorer = weight.scorer(context);
  final Bits parentDocs = parentsFilter.getBitSet(context);

  int target;
  do {
    // make the parent scorer advance to a doc ID which is not a parent
    target = TestUtil.nextInt(random(), 0, context.reader().maxDoc() - 2);
  } while (parentDocs.get(target + 1));

  final int illegalTarget = target;
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    scorer.iterator().advance(illegalTarget);
  });
  assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE));
}
 
源代码13 项目: lucene-solr   文件: TestQueryBitSetProducer.java
public void testSimple() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();

  QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery());
  assertNull(producer.getBitSet(reader.leaves().get(0)));
  assertEquals(1, producer.cache.size());

  producer = new QueryBitSetProducer(new MatchAllDocsQuery());
  BitSet bitSet = producer.getBitSet(reader.leaves().get(0));
  assertEquals(1, bitSet.length());
  assertEquals(true, bitSet.get(0));
  assertEquals(1, producer.cache.size());

  IOUtils.close(reader, w, dir);
}
 
源代码14 项目: jstarcraft-example   文件: MovieService.java
/**
 * 
 * @param userIndex
 * @param modelKey
 * @param queryKey
 * @param filterClicked
 * @return
 * @throws Exception
 */
@LockableMethod(strategy = HashLockableStrategy.class)
public Object2FloatMap<MovieItem> getItems(@LockableParameter int userIndex, String modelKey, String queryKey, boolean filterClicked) throws Exception {
    // 标识-得分映射
    Object2FloatMap<MovieItem> item2ScoreMap = new Object2FloatOpenHashMap<>();

    long current = System.currentTimeMillis();
    Model model = models.get(modelKey);
    ArrayInstance instance = new ArrayInstance(qualityOrder, quantityOrder);
    MovieUser user = users.get(userIndex);
    Query query = StringUtility.isBlank(queryKey) ? new MatchAllDocsQuery() : queryParser.parse(queryKey, MovieItem.TITLE);
    KeyValue<List<Document>, FloatList> retrieve = engine.retrieveDocuments(query, null, 0, 1000);
    List<Document> documents = retrieve.getKey();
    for (int index = 0, size = documents.size(); index < size; index++) {
        Document document = documents.get(index);
        MovieItem item = items.get(document.getField(MovieItem.INDEX).numericValue().intValue());
        int itemIndex = item.getIndex();
        // 过滤条目
        if (filterClicked && user.isClicked(itemIndex)) {
            continue;
        }
        instance.setQualityFeature(userDimension, userIndex);
        instance.setQualityFeature(itemDimension, itemIndex);
        model.predict(instance);
        float score = instance.getQuantityMark();
        item2ScoreMap.put(item, score);
    }
    String message = StringUtility.format("预测数量:{},预测耗时:{}", modelKey, documents.size(), System.currentTimeMillis() - current);
    logger.info(message);

    return item2ScoreMap;
}
 
源代码15 项目: HongsCORE   文件: LuceneRecord.java
public Query padQry(Map rd) throws HongsException {
    BooleanQuery.Builder qr = new BooleanQuery.Builder();

    padQry(qr, rd);

    BooleanQuery qu = qr.build();
    if (! qu.clauses().isEmpty()) {
        return qu ;
    }

    return new MatchAllDocsQuery( );
}
 
源代码16 项目: lucene-solr   文件: TestMonitor.java
public void testCanClearTheMonitor() throws IOException {
  try (Monitor monitor = newMonitor()) {
    monitor.register(
        new MonitorQuery("query1", new MatchAllDocsQuery()),
        new MonitorQuery("query2", new MatchAllDocsQuery()),
        new MonitorQuery("query3", new MatchAllDocsQuery()));
    assertEquals(3, monitor.getQueryCount());

    monitor.clear();
    assertEquals(0, monitor.getQueryCount());
  }
}
 
源代码17 项目: webdsl   文件: AbstractIndexManager.java
protected static Query mustNotNamespaceQuery(String namespace) {
	BooleanQuery q = new BooleanQuery();
	q.add(new MatchAllDocsQuery(), Occur.SHOULD); // needed to perform a
													// must not query
	q.add(new TermQuery(new Term(SearchHelper.NAMESPACEFIELD, namespace)),
			Occur.MUST_NOT);
	return q;
}
 
源代码18 项目: crate   文件: EngineTestCase.java
protected static void assertVisibleCount(InternalEngine engine, int numDocs, boolean refresh) throws IOException {
    if (refresh) {
        engine.refresh("test");
    }
    try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
        final TotalHitCountCollector collector = new TotalHitCountCollector();
        searcher.searcher().search(new MatchAllDocsQuery(), collector);
        assertThat(collector.getTotalHits(), equalTo(numDocs));
    }
}
 
源代码19 项目: lucene-solr   文件: TestFeatureSort.java
public void testFeatureMissingFeatureNameInSegment() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  doc.add(new FeatureField("field", "different_name", 0.5F));
  writer.addDocument(doc);
  writer.commit();
  doc = new Document();
  doc.add(new FeatureField("field", "name", 1.3F));
  doc.add(newStringField("value", "1.3", Field.Store.YES));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 4.2F));
  doc.add(newStringField("value", "4.2", Field.Store.YES));
  writer.addDocument(doc);
  IndexReader ir = writer.getReader();
  writer.close();

  IndexSearcher searcher = newSearcher(ir);
  Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));

  TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
  assertEquals(3, td.totalHits.value);
  // null is treated as 0
  assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value"));
  assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value"));
  assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));

  ir.close();
  dir.close();
}
 
源代码20 项目: lucene-solr   文件: DocToDoubleVectorUtilsTest.java
@Test
public void testDenseFreqDoubleArrayConversion() throws Exception {
  IndexSearcher indexSearcher = new IndexSearcher(index);
  for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {
    Terms docTerms = index.getTermVector(scoreDoc.doc, "text");
    Double[] vector = DocToDoubleVectorUtils.toDenseLocalFreqDoubleArray(docTerms);
    assertNotNull(vector);
    assertTrue(vector.length > 0);
  }
}
 
源代码21 项目: Elasticsearch   文件: MatchAllQueryParser.java
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
    XContentParser parser = parseContext.parser();

    float boost = 1.0f;
    String currentFieldName = null;

    XContentParser.Token token;
    while (((token = parser.nextToken()) != XContentParser.Token.END_OBJECT && token != XContentParser.Token.END_ARRAY)) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token.isValue()) {
            if ("boost".equals(currentFieldName)) {
                boost = parser.floatValue();
            } else {
                throw new QueryParsingException(parseContext, "[match_all] query does not support [" + currentFieldName + "]");
            }
        }
    }

    if (boost == 1.0f) {
        return Queries.newMatchAllQuery();
    }

    MatchAllDocsQuery query = new MatchAllDocsQuery();
    query.setBoost(boost);
    return query;
}
 
public void testSubsetFeaturesTermQ() throws IOException {
    //     public LambdaMART(List<RankList> samples, int[] features, MetricScorer scorer) {
    String userQuery = "brown cow";

    Query baseQuery = new MatchAllDocsQuery();

    List<Query> features = Arrays.asList(
            new TermQuery(new Term("field",  userQuery.split(" ")[0])),
            new PhraseQuery("field", userQuery.split(" ")),
            new PhraseQuery(1, "field", userQuery.split(" ") ));
    checkModelWithFeatures(toPrebuildFeatureWithNoName(features), new int[] {1}, null);
}
 
源代码23 项目: lucene-solr   文件: TestUnifiedHighlighterMTQ.java
public void testOneRegexp() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new RegexpQuery(new Term("body", "te.*"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  // wrong field
  highlighter.setFieldMatcher(null);//default
  BooleanQuery bq = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD)
      .add(new RegexpQuery(new Term("bogus", "te.*")), BooleanClause.Occur.SHOULD)
      .build();
  topDocs = searcher.search(bq, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  snippets = highlighter.highlight("body", bq, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a test.", snippets[0]);
  assertEquals("Test a one sentence document.", snippets[1]);

  ir.close();
}
 
源代码24 项目: lucene-solr   文件: PresearcherTestBase.java
public void testNegativeQueryHandling() throws IOException {
  Query q = new BooleanQuery.Builder()
      .add(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD)
      .add(new TermQuery(new Term("f", "foo")), BooleanClause.Occur.MUST_NOT)
      .build();
  try (Monitor monitor = newMonitor()) {
    monitor.register(new MonitorQuery("1", q));

    MultiMatchingQueries<QueryMatch> matches = monitor.match(new Document[]{
        buildDoc("f", "bar"), buildDoc("f", "foo")
    }, QueryMatch.SIMPLE_MATCHER);
    assertEquals(1, matches.getMatchCount(0));
    assertEquals(0, matches.getMatchCount(1));
  }
}
 
/**
 * This test is for maintaining the extensibility of the FieldOffsetStrategy
 * for customizations out of package.
 */
@Test
public void testFieldOffsetStrategyExtensibility() {
  final UnifiedHighlighter.OffsetSource offsetSource = UnifiedHighlighter.OffsetSource.NONE_NEEDED;
  FieldOffsetStrategy strategy = new FieldOffsetStrategy(new UHComponents("field",
      (s) -> false,
      new MatchAllDocsQuery(), new BytesRef[0],
      PhraseHelper.NONE,
      new LabelledCharArrayMatcher[0], false, Collections.emptySet())) {
    @Override
    public UnifiedHighlighter.OffsetSource getOffsetSource() {
      return offsetSource;
    }

    @Override
    public OffsetsEnum getOffsetsEnum(LeafReader reader, int docId, String content) throws IOException {
      return OffsetsEnum.EMPTY;
    }

    @Override
    protected OffsetsEnum createOffsetsEnumFromReader(LeafReader leafReader, int doc) throws IOException {
      return super.createOffsetsEnumFromReader(leafReader, doc);
    }

  };
  assertEquals(offsetSource, strategy.getOffsetSource());
}
 
源代码26 项目: lucene-solr   文件: TestMemoryIndex.java
@Test
public void testFreezeAPI() {

  MemoryIndex mi = new MemoryIndex();
  mi.addField("f1", "some text", analyzer);

  assertThat(mi.search(new MatchAllDocsQuery()), not(is(0.0f)));
  assertThat(mi.search(new TermQuery(new Term("f1", "some"))), not(is(0.0f)));

  // check we can add a new field after searching
  mi.addField("f2", "some more text", analyzer);
  assertThat(mi.search(new TermQuery(new Term("f2", "some"))), not(is(0.0f)));

  // freeze!
  mi.freeze();

  RuntimeException expected = expectThrows(RuntimeException.class, () -> {
    mi.addField("f3", "and yet more", analyzer);
  });
  assertThat(expected.getMessage(), containsString("frozen"));

  expected = expectThrows(RuntimeException.class, () -> {
    mi.setSimilarity(new BM25Similarity(1, 1));
  });
  assertThat(expected.getMessage(), containsString("frozen"));

  assertThat(mi.search(new TermQuery(new Term("f1", "some"))), not(is(0.0f)));

  mi.reset();
  mi.addField("f1", "wibble", analyzer);
  assertThat(mi.search(new TermQuery(new Term("f1", "some"))), is(0.0f));
  assertThat(mi.search(new TermQuery(new Term("f1", "wibble"))), not(is(0.0f)));

  // check we can set the Similarity again
  mi.setSimilarity(new ClassicSimilarity());

}
 
源代码27 项目: lucene-solr   文件: TestPointVectorStrategy.java
@Test
public void testFieldOptions() throws IOException, ParseException {
  // It's not stored; test it isn't.
  this.strategy = PointVectorStrategy.newInstance(ctx, getClass().getSimpleName());
  adoc("99", "POINT(-5.0 8.2)");
  commit();
  SearchResults results = executeQuery(new MatchAllDocsQuery(), 1);
  Document document = results.results.get(0).document;
  assertNull("not stored", document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_X));
  assertNull("not stored", document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_Y));
  deleteAll();

  // Now we mark it stored.  We also disable pointvalues...
  FieldType fieldType = new FieldType(PointVectorStrategy.DEFAULT_FIELDTYPE);
  fieldType.setStored(true);
  fieldType.setDimensions(0, 0);//disable point values
  this.strategy = new PointVectorStrategy(ctx, getClass().getSimpleName(), fieldType);
  adoc("99", "POINT(-5.0 8.2)");
  commit();
  results = executeQuery(new MatchAllDocsQuery(), 1);
  document = results.results.get(0).document;
  assertEquals("stored", -5.0, document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_X).numericValue());
  assertEquals("stored", 8.2,  document.getField(strategy.getFieldName() + PointVectorStrategy.SUFFIX_Y).numericValue());

  // Test a query fails without point values
  expectThrows(UnsupportedOperationException.class, () -> {
    SpatialArgs args = new SpatialArgs(SpatialOperation.Intersects, ctx.makeRectangle(-10.0, 10.0, -5.0, 5.0));
    this.strategy.makeQuery(args);
  });
}
 
/** User runs a query and aggregates facets. */
private FacetResult search() throws IOException, ParseException {
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  IndexSearcher searcher = new IndexSearcher(indexReader);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

  // Aggregate categories by an expression that combines the document's score
  // and its popularity field
  Expression expr = JavascriptCompiler.compile("_score * sqrt(popularity)");
  SimpleBindings bindings = new SimpleBindings();
  bindings.add("_score", DoubleValuesSource.SCORES); // the score of the document
  bindings.add("popularity", DoubleValuesSource.fromLongField("popularity")); // the value of the 'popularity' field

  // Aggregates the facet values
  FacetsCollector fc = new FacetsCollector(true);

  // MatchAllDocsQuery is for "browsing" (counts facets
  // for all non-deleted docs in the index); normally
  // you'd use a "normal" query:
  FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);

  // Retrieve results
  Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, fc, expr.getDoubleValuesSource(bindings));
  FacetResult result = facets.getTopChildren(10, "A");
  
  indexReader.close();
  taxoReader.close();
  
  return result;
}
 
源代码29 项目: lucene-solr   文件: TestFeatureSort.java
public void testFeatureMissingFieldInSegment() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  writer.addDocument(doc);
  writer.commit();
  doc = new Document();
  doc.add(new FeatureField("field", "name", 1.3F));
  doc.add(newStringField("value", "1.3", Field.Store.YES));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 4.2F));
  doc.add(newStringField("value", "4.2", Field.Store.YES));
  writer.addDocument(doc);
  IndexReader ir = writer.getReader();
  writer.close();

  IndexSearcher searcher = newSearcher(ir);
  Sort sort = new Sort(FeatureField.newFeatureSort("field", "name"));

  TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
  assertEquals(3, td.totalHits.value);
  // null is treated as 0
  assertEquals("4.2", searcher.doc(td.scoreDocs[0].doc).get("value"));
  assertEquals("1.3", searcher.doc(td.scoreDocs[1].doc).get("value"));
  assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));

  ir.close();
  dir.close();
}
 
public void testFilteringOnMatchAllQueries() throws IOException {
  try (Monitor monitor = newMonitor()) {
    monitor.register(new MonitorQuery("1", new MatchAllDocsQuery(), null, Collections.singletonMap("language", "de")));

    Document enDoc = new Document();
    enDoc.add(newTextField(TEXTFIELD, "this is a test", Field.Store.NO));
    enDoc.add(newTextField("language", "en", Field.Store.NO));
    MatchingQueries<QueryMatch> matches = monitor.match(enDoc, QueryMatch.SIMPLE_MATCHER);
    assertEquals(0, matches.getMatchCount());
    assertEquals(0, matches.getQueriesRun());
  }
}
 
 类所在包
 类方法
 同包方法