类org.apache.lucene.search.Sort源码实例Demo

下面列出了怎么用org.apache.lucene.search.Sort的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: tutorials   文件: InMemoryLuceneIndex.java
public List<Document> searchIndex(Query query, Sort sort) {
    try {
        IndexReader indexReader = DirectoryReader.open(memoryIndex);
        IndexSearcher searcher = new IndexSearcher(indexReader);
        TopDocs topDocs = searcher.search(query, 10, sort);
        List<Document> documents = new ArrayList<>();
        for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
            documents.add(searcher.doc(scoreDoc.doc));
        }

        return documents;
    } catch (IOException e) {
        e.printStackTrace();
    }
    return null;

}
 
源代码2 项目: lucene-solr   文件: TestUnifiedHighlighter.java
public void testMatchesSlopBug() throws IOException {
  IndexReader ir = indexSomeFields();
  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new PhraseQuery(2, "title", "this", "is", "the", "field");
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(1, topDocs.totalHits.value);
  String[] snippets = highlighter.highlight("title", query, topDocs, 10);
  assertEquals(1, snippets.length);
  if (highlighter.getFlags("title").contains(HighlightFlag.WEIGHT_MATCHES)) {
    assertEquals("<b>This is the title field</b>.", snippets[0]);
  } else {
    assertEquals("<b>This</b> <b>is</b> <b>the</b> title <b>field</b>.", snippets[0]);
  }
  ir.close();
}
 
源代码3 项目: artifact-listener   文件: ProjectDaoImpl.java
@SuppressWarnings("unchecked")
@Override
public List<Project> searchByName(String searchTerm, Integer limit, Integer offset) {
	FullTextQuery query = getSearchByNameQuery(searchTerm);
	
	// Sort
	List<SortField> sortFields = ImmutableList.<SortField>builder()
			.add(new SortField(Project.NAME_SORT_FIELD_NAME, SortField.Type.STRING))
			.build();
	query.setSort(new Sort(sortFields.toArray(new SortField[sortFields.size()])));
	
	if (offset != null) {
		query.setFirstResult(offset);
	}
	if (limit != null) {
		query.setMaxResults(limit);
	}
	
	return (List<Project>) query.getResultList();
}
 
源代码4 项目: lucene-solr   文件: TestDemoExpressions.java
/** tests expression referring to another expression */
public void testExpressionRefersToExpression() throws Exception {
  Expression expr1 = JavascriptCompiler.compile("_score");
  Expression expr2 = JavascriptCompiler.compile("2*expr1");
  
  SimpleBindings bindings = new SimpleBindings();
  bindings.add("_score", DoubleValuesSource.SCORES);
  bindings.add("expr1", expr1);
  
  Sort sort = new Sort(expr2.getSortField(bindings, true));
  Query query = new TermQuery(new Term("body", "contents"));
  TopFieldDocs td = searcher.search(query, 3, sort, true);
  for (int i = 0; i < 3; i++) {
    FieldDoc d = (FieldDoc) td.scoreDocs[i];
    float expected = 2*d.score;
    float actual = ((Double)d.fields[0]).floatValue();
    assertEquals(expected, actual, 0d);
  }
}
 
源代码5 项目: lucene-solr   文件: TestDrillSideways.java
public void testEmptyIndex() throws Exception {
  // LUCENE-5045: make sure DrillSideways works with an empty index
  Directory dir = newDirectory();
  Directory taxoDir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
  IndexSearcher searcher = newSearcher(writer.getReader());
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);

  // Count "Author"
  FacetsConfig config = new FacetsConfig();
  DrillSideways ds = getNewDrillSideways(searcher, config, taxoReader);
  DrillDownQuery ddq = new DrillDownQuery(config);
  ddq.add("Author", "Lisa");

  DrillSidewaysResult r = ds.search(ddq, 10); // this used to fail on IllegalArgEx
  assertEquals(0, r.hits.totalHits.value);

  r = ds.search(ddq, null, null, 10, new Sort(new SortField("foo", SortField.Type.INT)), false); // this used to fail on IllegalArgEx
  assertEquals(0, r.hits.totalHits.value);

  writer.close();
  IOUtils.close(taxoWriter, searcher.getIndexReader(), taxoReader, dir, taxoDir);
}
 
源代码6 项目: SearchServices   文件: AlfrescoSolrDataModel.java
public Query getCMISQuery(CMISQueryMode mode, Pair<SearchParameters, Boolean> searchParametersAndFilter, SolrQueryRequest req, org.alfresco.repo.search.impl.querymodel.Query queryModelQuery, CmisVersion cmisVersion, String alternativeDictionary) throws ParseException
{
    SearchParameters searchParameters = searchParametersAndFilter.getFirst();
    Boolean isFilter = searchParametersAndFilter.getSecond();

    CmisFunctionEvaluationContext functionContext = getCMISFunctionEvaluationContext(mode, cmisVersion, alternativeDictionary);

    Set<String> selectorGroup = queryModelQuery.getSource().getSelectorGroups(functionContext).get(0);

    LuceneQueryBuilderContext<Query, Sort, ParseException> luceneContext = getLuceneQueryBuilderContext(searchParameters, req, alternativeDictionary, FTSQueryParser.RerankPhase.SINGLE_PASS);
    @SuppressWarnings("unchecked")
    LuceneQueryBuilder<Query, Sort, ParseException> builder = (LuceneQueryBuilder<Query, Sort, ParseException>) queryModelQuery;
    org.apache.lucene.search.Query luceneQuery = builder.buildQuery(selectorGroup, luceneContext, functionContext);

    return new ContextAwareQuery(luceneQuery, Boolean.TRUE.equals(isFilter) ? null : searchParameters);
}
 
public ReRankCollector(int reRankDocs,
                       int length,
                       Query reRankQuery,
                       double reRankWeight,
                       QueryCommand cmd,
                       IndexSearcher searcher,
                       Map<BytesRef, Integer> boostedPriority,
                       boolean scale) throws IOException {
    super(null);
    this.reRankQuery = reRankQuery;
    this.reRankDocs = reRankDocs;
    this.length = length;
    this.boostedPriority = boostedPriority;
    this.scale = scale;
    Sort sort = cmd.getSort();
    if(sort == null) {
        this.mainCollector = TopScoreDocCollector.create(Math.max(this.reRankDocs, length), null);
    } else {
        sort = sort.rewrite(searcher);
        this.mainCollector = TopFieldCollector.create(sort, Math.max(this.reRankDocs, length), null, false, true, true);
    }
    this.searcher = searcher;
    this.reRankWeight = reRankWeight;
}
 
源代码8 项目: lucene-solr   文件: TestFieldCacheSort.java
/** test that we throw exception on multi-valued field, creates corrupt reader, use SORTED_SET instead */
public void testMultiValuedField() throws IOException {
  Directory indexStore = newDirectory();
  IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random())));
  for(int i=0; i<5; i++) {
      Document doc = new Document();
      doc.add(new StringField("string", "a"+i, Field.Store.NO));
      doc.add(new StringField("string", "b"+i, Field.Store.NO));
      writer.addDocument(doc);
  }
  writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
  writer.close();
  Sort sort = new Sort(
      new SortField("string", SortField.Type.STRING),
      SortField.FIELD_DOC);
  IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore),
                       Collections.singletonMap("string", Type.SORTED));
  IndexSearcher searcher = new IndexSearcher(reader);
  expectThrows(IllegalStateException.class, () -> {
    searcher.search(new MatchAllDocsQuery(), 500, sort);
  });
  reader.close();
  indexStore.close();
}
 
源代码9 项目: lucene-solr   文件: ExpandComponent.java
public GroupExpandCollector(SortedDocValues docValues, FixedBitSet groupBits, IntHashSet collapsedSet, int limit, Sort sort) throws IOException {
  int numGroups = collapsedSet.size();
  groups = new LongObjectHashMap<>(numGroups);
  DocIdSetIterator iterator = new BitSetIterator(groupBits, 0); // cost is not useful here
  int group;
  while ((group = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
    groups.put(group, getCollector(limit, sort));
  }

  this.collapsedSet = collapsedSet;
  this.groupBits = groupBits;
  this.docValues = docValues;
  if(docValues instanceof MultiDocValues.MultiSortedDocValues) {
    this.multiSortedDocValues = (MultiDocValues.MultiSortedDocValues)docValues;
    this.ordinalMap = multiSortedDocValues.mapping;
  }
}
 
源代码10 项目: lucene-solr   文件: Grouping.java
@Override
protected Collector createFirstPassCollector() throws IOException {
  DocSet groupFilt = searcher.getDocSet(query);
  int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
  Collector subCollector;
  if (withinGroupSort == null || withinGroupSort.equals(Sort.RELEVANCE)) {
    subCollector = topCollector = TopScoreDocCollector.create(groupDocsToCollect, Integer.MAX_VALUE);
  } else {
    topCollector = TopFieldCollector.create(searcher.weightSort(withinGroupSort), groupDocsToCollect, Integer.MAX_VALUE);
    if (needScores) {
      maxScoreCollector = new MaxScoreCollector();
      subCollector = MultiCollector.wrap(topCollector, maxScoreCollector);
    } else {
      subCollector = topCollector;
    }
  }
  collector = new FilterCollector(groupFilt, subCollector);
  return collector;
}
 
源代码11 项目: lucene-solr   文件: TestDemoExpressions.java
/** tests same binding used more than once in an expression */
public void testTwoOfSameBinding() throws Exception {
  Expression expr = JavascriptCompiler.compile("_score + _score");
  
  SimpleBindings bindings = new SimpleBindings();
  bindings.add("_score", DoubleValuesSource.SCORES);
  
  Sort sort = new Sort(expr.getSortField(bindings, true));
  Query query = new TermQuery(new Term("body", "contents"));
  TopFieldDocs td = searcher.search(query, 3, sort, true);
  for (int i = 0; i < 3; i++) {
    FieldDoc d = (FieldDoc) td.scoreDocs[i];
    float expected = 2*d.score;
    float actual = ((Double)d.fields[0]).floatValue();
    assertEquals(expected, actual, 0d);
  }
}
 
源代码12 项目: dremio-oss   文件: TestLuceneIndexer.java
@Test
public void testSearcherCache() throws Exception {
  try (LuceneSearchIndex index = new LuceneSearchIndex(null, "searcher-cache", true, CommitWrapper.NO_OP)) {
    for (int i = 0; i < 10; ++i) {
      final Document doc = new Document();
      doc.add(
        new StringField(CoreIndexedStore.ID_FIELD_NAME, new BytesRef(Integer.toString(i).getBytes()), Store.YES));
      doc.add(new StringField("user", "u1", Field.Store.YES));
      index.add(doc);
    }

    LuceneSearchIndex.SearchHandle searchHandle = index.createSearchHandle();

    // search without limit, returns 10 docs.
    Query query = new TermQuery(new Term("user", "u1"));
    List<Doc> docs = index.search(searchHandle, query, 1000, new Sort(), 0);
    assertEquals(10, docs.size());

    // no more docs, search should return empty.
    docs = index.searchAfter(searchHandle, query, 1000, new Sort(), docs.get(9));
    assertEquals(0, docs.size());

    searchHandle.close();
  }
}
 
源代码13 项目: lucene-solr   文件: TestNumericTerms64.java
private void testSorting(int precisionStep) throws Exception {
  String field="field"+precisionStep;
  // 10 random tests, the index order is ascending,
  // so using a reverse sort field should retun descending documents
  int num = TestUtil.nextInt(random(), 10, 20);
  for (int i = 0; i < num; i++) {
    long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
    long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
    if (lower>upper) {
      long a=lower; lower=upper; upper=a;
    }
    Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
    TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
    if (topDocs.totalHits.value==0) continue;
    ScoreDoc[] sd = topDocs.scoreDocs;
    assertNotNull(sd);
    long last=searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
    for (int j=1; j<sd.length; j++) {
      long act=searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
      assertTrue("Docs should be sorted backwards", last>act );
      last=act;
    }
  }
}
 
源代码14 项目: crate   文件: LuceneOrderedDocCollectorTest.java
private LuceneOrderedDocCollector collector(IndexSearcher searcher,
                                            List<LuceneCollectorExpression<?>> columnReferences,
                                            Query query,
                                            @Nullable Float minScore, boolean doDocScores) {
    return new LuceneOrderedDocCollector(
        new ShardId("dummy", UUIDs.base64UUID(), 0),
        searcher,
        query,
        minScore,
        doDocScores,
        2,
        RamAccounting.NO_ACCOUNTING,
        new CollectorContext(),
        f -> null,
        new Sort(SortField.FIELD_SCORE),
        columnReferences,
        columnReferences
    );
}
 
public void testBasics() throws IOException {
  indexWriter.addDocument(newDoc("Yin yang, filter")); // filter out. test getTermToSpanLists reader 1-doc filter
  indexWriter.addDocument(newDoc("yin alone, Yin yang, yin gap yang"));
  initReaderSearcherHighlighter();

  //query:  -filter +"yin yang"
  BooleanQuery query = new BooleanQuery.Builder()
      .add(new TermQuery(new Term("body", "filter")), BooleanClause.Occur.MUST_NOT)
      .add(newPhraseQuery("body", "yin yang"), BooleanClause.Occur.MUST)
      .build();


  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  String[] snippets = highlighter.highlight("body", query, topDocs);
  if (highlighter.getFlags("body").contains(HighlightFlag.WEIGHT_MATCHES)) {
    assertArrayEquals(new String[]{"yin alone, <b>Yin yang</b>, yin gap yang"}, snippets);
  } else {
    assertArrayEquals(new String[]{"yin alone, <b>Yin</b> <b>yang</b>, yin gap yang"}, snippets);
  }
}
 
public void testWithSameTermQuery() throws IOException {
  indexWriter.addDocument(newDoc("Yin yang, yin gap yang"));
  initReaderSearcherHighlighter();

  BooleanQuery query = new BooleanQuery.Builder()
      .add(new TermQuery(new Term("body", "yin")), BooleanClause.Occur.MUST)
      .add(newPhraseQuery("body", "yin yang"), BooleanClause.Occur.MUST)
      // add queries for other fields; we shouldn't highlight these because of that.
      .add(new TermQuery(new Term("title", "yang")), BooleanClause.Occur.SHOULD)
      .build();

  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  dupMatchAllowed.set(false); // We don't want duplicates from "Yin" being in TermQuery & PhraseQuery.
  String[] snippets = highlighter.highlight("body", query, topDocs);
  if (highlighter.getFlags("body").contains(HighlightFlag.WEIGHT_MATCHES)) {
    assertArrayEquals(new String[]{"<b>Yin yang</b>, <b>yin</b> gap yang"}, snippets);
  } else {
    assertArrayEquals(new String[]{"<b>Yin</b> <b>yang</b>, <b>yin</b> gap yang"}, snippets);
  }
}
 
源代码17 项目: lucene-solr   文件: TestAddIndexes.java
public void testIllegalIndexSortChange2() throws Exception {
  Directory dir1 = newDirectory();
  IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc1.setIndexSort(new Sort(new SortField("foo", SortField.Type.INT)));
  RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1, iwc1);
  w1.addDocument(new Document());
  w1.commit();
  w1.addDocument(new Document());
  w1.commit();
  // so the index sort is in fact burned into the index:
  w1.forceMerge(1);
  w1.close();

  Directory dir2 = newDirectory();
  IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc2.setIndexSort(new Sort(new SortField("foo", SortField.Type.STRING)));
  RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2, iwc2);
  IndexReader r1 = DirectoryReader.open(dir1);
  String message = expectThrows(IllegalArgumentException.class, () -> {
      w2.addIndexes((SegmentReader) getOnlyLeafReader(r1));
    }).getMessage();
  assertEquals("cannot change index sort from <int: \"foo\"> to <string: \"foo\">", message);
  IOUtils.close(r1, dir1, w2, dir2);
}
 
源代码18 项目: lucene-solr   文件: MergeState.java
private DocMap[] buildDocMaps(List<CodecReader> readers, Sort indexSort) throws IOException {

    if (indexSort == null) {
      // no index sort ... we only must map around deletions, and rebase to the merged segment's docID space
      return buildDeletionDocMaps(readers);
    } else {
      // do a merge sort of the incoming leaves:
      long t0 = System.nanoTime();
      DocMap[] result = MultiSorter.sort(indexSort, readers);
      if (result == null) {
        // already sorted so we can switch back to map around deletions
        return buildDeletionDocMaps(readers);
      } else {
        needsIndexSort = true;
      }
      long t1 = System.nanoTime();
      if (infoStream.isEnabled("SM")) {
        infoStream.message("SM", String.format(Locale.ROOT, "%.2f msec to build merge sorted DocMaps", (t1-t0)/1000000.0));
      }
      return result;
    }
  }
 
源代码19 项目: semantic-knowledge-graph   文件: NodeNormalizer.java
private List<AggregationWaitable> buildWaitables(NodeContext context, RequestNode request) throws IOException {
    List<AggregationWaitable> runners = new LinkedList<>();
    FacetFieldAdapter adapter = new FacetFieldAdapter(context, request.type);
    if(request.values != null && adapter.hasExtension())
    {
        for (int k = 0; k < request.values.length; ++k)
        {
            // load required docListAndSet once and only if necessary
            if (context.queryDomainList == null)
            {
                context.queryDomainList =
                        context.req.getSearcher().getDocListAndSet(new MatchAllDocsQuery(),
                                context.queryDomain, Sort.INDEXORDER, 0, 0);
            }
            String facetQuery = buildFacetQuery(adapter.baseField, request.values[k].toLowerCase());
            runners.add(new AggregationWaitable(context, adapter, facetQuery, adapter.field, k, DEFAULT_NORM_LIMIT));
        }
    }
    return runners;
}
 
源代码20 项目: lucene-solr   文件: TestBackwardsCompatibility.java
public void testSortedIndex() throws Exception {
  for(String name : oldSortedNames) {
    Path path = createTempDir("sorted");
    InputStream resource = TestBackwardsCompatibility.class.getResourceAsStream(name + ".zip");
    assertNotNull("Sorted index index " + name + " not found", resource);
    TestUtil.unzip(resource, path);

    // TODO: more tests
    Directory dir = newFSDirectory(path);

    DirectoryReader reader = DirectoryReader.open(dir);
    assertEquals(1, reader.leaves().size());
    Sort sort = reader.leaves().get(0).reader().getMetaData().getSort();
    assertNotNull(sort);
    assertEquals("<long: \"dateDV\">!", sort.toString());
    reader.close();

    // this will confirm the docs really are sorted:
    TestUtil.checkIndex(dir);
    dir.close();
  }
}
 
源代码21 项目: lucene-solr   文件: TestDemoExpressions.java
private void doTestLotsOfBindings(int n) throws Exception {
  SimpleBindings bindings = new SimpleBindings();    
  StringBuilder sb = new StringBuilder();
  for (int i = 0; i < n; i++) {
    if (i > 0) {
      sb.append("+");
    }
    sb.append("x" + i);
    bindings.add("x" + i, DoubleValuesSource.SCORES);
  }
  
  Expression expr = JavascriptCompiler.compile(sb.toString());
  Sort sort = new Sort(expr.getSortField(bindings, true));
  Query query = new TermQuery(new Term("body", "contents"));
  TopFieldDocs td = searcher.search(query, 3, sort, true);
  for (int i = 0; i < 3; i++) {
    FieldDoc d = (FieldDoc) td.scoreDocs[i];
    float expected = n*d.score;
    float actual = ((Double)d.fields[0]).floatValue();
    assertEquals(expected, actual, 0d);
  }
}
 
public void testBasics() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
  iw.addDocument(doc);
  body.setStringValue("Highlighting the first term. Hope it works.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new IntervalQuery("body", Intervals.term("highlighting"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
  assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
  ir.close();
}
 
源代码23 项目: lucene-solr   文件: TestAddIndexes.java
public void testIllegalIndexSortChange1() throws Exception {
  Directory dir1 = newDirectory();
  IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc1.setIndexSort(new Sort(new SortField("foo", SortField.Type.INT)));
  RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1, iwc1);
  w1.addDocument(new Document());
  w1.commit();
  w1.addDocument(new Document());
  w1.commit();
  // so the index sort is in fact burned into the index:
  w1.forceMerge(1);
  w1.close();

  Directory dir2 = newDirectory();
  IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc2.setIndexSort(new Sort(new SortField("foo", SortField.Type.STRING)));
  RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2, iwc2);
  String message = expectThrows(IllegalArgumentException.class, () -> {
      w2.addIndexes(dir1);
    }).getMessage();
  assertEquals("cannot change index sort from <int: \"foo\"> to <string: \"foo\">", message);
  IOUtils.close(dir1, w2, dir2);
}
 
public void testMultiplePassages() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
  iw.addDocument(doc);
  body.setStringValue("This test is another test. Not a good sentence. Test test test test.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new IntervalQuery("body", Intervals.term("test"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs, 2);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>. Just a <b>test</b> highlighting from postings. ", snippets[0]);
  assertEquals("This <b>test</b> is another <b>test</b>. ... <b>Test</b> <b>test</b> <b>test</b> <b>test</b>.", snippets[1]);
  ir.close();
}
 
源代码25 项目: lucene-solr   文件: DefaultSolrCoreState.java
public Sort getMergePolicySort() throws IOException {
  lock(iwLock.readLock());
  try {
    if (indexWriter != null) {
      final MergePolicy mergePolicy = indexWriter.getConfig().getMergePolicy();
      if (mergePolicy instanceof SortingMergePolicy) {
        return ((SortingMergePolicy)mergePolicy).getSort();
      }
    }
  } finally {
    iwLock.readLock().unlock();
  }
  return null;
}
 
源代码26 项目: lucene-solr   文件: TopGroupsResultTransformer.java
protected ScoreDoc[] transformToNativeShardDoc(List<NamedList<Object>> documents, Sort groupSort, String shard,
                                               IndexSchema schema) {
  ScoreDoc[] scoreDocs = new ScoreDoc[documents.size()];
  int j = 0;
  for (NamedList<Object> document : documents) {
    Object docId = document.get(ID);
    if (docId != null) {
      docId = docId.toString();
    } else {
      log.error("doc {} has null 'id'", document);
    }
    Float score = (Float) document.get("score");
    if (score == null) {
      score = Float.NaN;
    }
    Object[] sortValues = null;
    Object sortValuesVal = document.get("sortValues");
    if (sortValuesVal != null) {
      sortValues = ((List) sortValuesVal).toArray();
      for (int k = 0; k < sortValues.length; k++) {
        SchemaField field = groupSort.getSort()[k].getField() != null
            ? schema.getFieldOrNull(groupSort.getSort()[k].getField()) : null;
        sortValues[k] = ShardResultTransformerUtils.unmarshalSortValue(sortValues[k], field);
      }
    } else {
      log.debug("doc {} has null 'sortValues'", document);
    }
    scoreDocs[j++] = new ShardDoc(score, sortValues, docId, shard);
  }
  return scoreDocs;
}
 
源代码27 项目: lucene-solr   文件: QueryElevationComponent.java
private SortSpec modifySortSpec(SortSpec current, boolean forceElevation, ElevationComparatorSource comparator) {
  boolean modify = false;
  SortField[] currentSorts = current.getSort().getSort();
  List<SchemaField> currentFields = current.getSchemaFields();

  ArrayList<SortField> sorts = new ArrayList<>(currentSorts.length + 1);
  List<SchemaField> fields = new ArrayList<>(currentFields.size() + 1);

  // Perhaps force it to always sort by score
  if (forceElevation && currentSorts[0].getType() != SortField.Type.SCORE) {
    sorts.add(new SortField("_elevate_", comparator, true));
    fields.add(null);
    modify = true;
  }
  for (int i = 0; i < currentSorts.length; i++) {
    SortField sf = currentSorts[i];
    if (sf.getType() == SortField.Type.SCORE) {
      sorts.add(new SortField("_elevate_", comparator, !sf.getReverse()));
      fields.add(null);
      modify = true;
    }
    sorts.add(sf);
    fields.add(currentFields.get(i));
  }
  return modify ?
          new SortSpec(new Sort(sorts.toArray(new SortField[0])),
                  fields,
                  current.getCount(),
                  current.getOffset())
          : null;
}
 
源代码28 项目: lucene-solr   文件: Grouping.java
@Override
protected Collector createFirstPassCollector() throws IOException {
  // Ok we don't want groups, but do want a total count
  if (actualGroupsToFind <= 0) {
    fallBackCollector = new TotalHitCountCollector();
    return fallBackCollector;
  }

  groupSort = groupSort == null ? Sort.RELEVANCE : groupSort;
  firstPass = new FirstPassGroupingCollector<>(newSelector(), searcher.weightSort(groupSort), actualGroupsToFind);
  return firstPass;
}
 
源代码29 项目: lucene-solr   文件: BlockGroupingTest.java
public void testTopLevelSort() throws IOException {

    Shard shard = new Shard();
    indexRandomDocs(shard.writer);
    IndexSearcher searcher = shard.getIndexSearcher();

    Sort sort = new Sort(new SortField("length", SortField.Type.LONG));

    Query blockEndQuery = new TermQuery(new Term("blockEnd", "true"));
    GroupingSearch grouper = new GroupingSearch(blockEndQuery);
    grouper.setGroupDocsLimit(10);
    grouper.setGroupSort(sort);     // groups returned sorted by length, chapters within group sorted by relevancy

    Query topLevel = new TermQuery(new Term("text", "grandmother"));
    TopGroups<?> tg = grouper.search(searcher, topLevel, 0, 5);

    // The sort value of the top doc in the top group should be the same as the sort value
    // of the top result from the same search done with no grouping
    TopDocs topDoc = searcher.search(topLevel, 1, sort);
    assertEquals(((FieldDoc)topDoc.scoreDocs[0]).fields[0], tg.groups[0].groupSortValues[0]);

    for (int i = 0; i < tg.groups.length; i++) {
      String bookName = searcher.doc(tg.groups[i].scoreDocs[0].doc).get("book");
      // The contents of each group should be equal to the results of a search for
      // that group alone, sorted by score
      Query filtered = new BooleanQuery.Builder()
          .add(topLevel, BooleanClause.Occur.MUST)
          .add(new TermQuery(new Term("book", bookName)), BooleanClause.Occur.FILTER)
          .build();
      TopDocs td = searcher.search(filtered, 10);
      assertScoreDocsEquals(td.scoreDocs, tg.groups[i].scoreDocs);
      if (i > 1) {
        assertSortsBefore(tg.groups[i - 1], tg.groups[i]);
      }
    }

    shard.close();

  }
 
源代码30 项目: lucene-solr   文件: TestUnifiedHighlighter.java
public void testOneSentence() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test.");
  iw.addDocument(doc);
  body.setStringValue("Test a one sentence document.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new TermQuery(new Term("body", "test"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("This is a <b>test</b>.", snippets[0]);
  assertEquals("<b>Test</b> a one sentence document.", snippets[1]);

  ir.close();
}
 
 类所在包
 同包方法