org.apache.lucene.search.TotalHitCountCollector#getTotalHits ( )源码实例Demo

下面列出了org.apache.lucene.search.TotalHitCountCollector#getTotalHits ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: lucene-solr   文件: SimpleNaiveBayesClassifier.java
/**
 * count the number of documents in the index having at least a value for the 'class' field
 *
 * @return the no. of documents having a value for the 'class' field
 * @throws IOException if accessing to term vectors or search fails
 */
protected int countDocsWithClass() throws IOException {
  Terms terms = MultiTerms.getTerms(this.indexReader, this.classFieldName);
  int docCount;
  if (terms == null || terms.getDocCount() == -1) { // in case codec doesn't support getDocCount
    TotalHitCountCollector classQueryCountCollector = new TotalHitCountCollector();
    BooleanQuery.Builder q = new BooleanQuery.Builder();
    q.add(new BooleanClause(new WildcardQuery(new Term(classFieldName, String.valueOf(WildcardQuery.WILDCARD_STRING))), BooleanClause.Occur.MUST));
    if (query != null) {
      q.add(query, BooleanClause.Occur.MUST);
    }
    indexSearcher.search(q.build(),
        classQueryCountCollector);
    docCount = classQueryCountCollector.getTotalHits();
  } else {
    docCount = terms.getDocCount();
  }
  return docCount;
}
 
源代码2 项目: lucene-solr   文件: SimpleNaiveBayesClassifier.java
/**
 * Returns the number of documents of the input class ( from the whole index or from a subset)
 * that contains the word ( in a specific field or in all the fields if no one selected)
 * @param word the token produced by the analyzer
 * @param term the term representing the class
 * @return the number of documents of the input class
 * @throws IOException if a low level I/O problem happens
 */
private int getWordFreqForClass(String word, Term term) throws IOException {
  BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
  BooleanQuery.Builder subQuery = new BooleanQuery.Builder();
  for (String textFieldName : textFieldNames) {
    subQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.SHOULD));
  }
  booleanQuery.add(new BooleanClause(subQuery.build(), BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.MUST));
  if (query != null) {
    booleanQuery.add(query, BooleanClause.Occur.MUST);
  }
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery.build(), totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
 
源代码3 项目: chronix.server   文件: ChronixRetentionHandler.java
/**
 * Searches the index, if older documents exists. Updates the solr query response.
 *
 * @param req - the solr query request information
 * @param rsp - the solr query response information
 * @return true if the hit count is greater zero, otherwise false
 * @throws SyntaxError, IOException if bad things happen
 */
private boolean olderDocumentsExists(String queryString, SolrQueryRequest req, SolrQueryResponse rsp) throws SyntaxError, IOException {
    String defType = req.getParams().get(QueryParsing.DEFTYPE, QParserPlugin.DEFAULT_QTYPE);

    QParser queryParser = QParser.getParser(queryString, defType, req);
    Query query = queryParser.getQuery();

    TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
    req.getSearcher().search(query, totalHitCountCollector);

    rsp.add("query", String.format("%s:[* TO NOW-%s]", queryField, timeSeriesAge));
    rsp.add("queryTechnical", queryString);
    rsp.add("removedDocuments", totalHitCountCollector.getTotalHits());

    return totalHitCountCollector.getTotalHits() != 0;
}
 
源代码4 项目: lucene-solr   文件: HeatmapFacetCounterTest.java
private int countMatchingDocsAtLevel(Point pt, int facetLevel) throws IOException {
  // we use IntersectsPrefixTreeFilter directly so that we can specify the level to go to exactly.
  RecursivePrefixTreeStrategy strategy = (RecursivePrefixTreeStrategy) this.strategy;
  Query filter = new IntersectsPrefixTreeQuery(
      pt, strategy.getFieldName(), grid, facetLevel, grid.getMaxLevels());
  final TotalHitCountCollector collector = new TotalHitCountCollector();
  indexSearcher.search(filter, collector);
  cellsValidated++;
  if (collector.getTotalHits() > 0) {
    cellValidatedNonZero++;
  }
  return collector.getTotalHits();
}
 
/**
 * Returns the number of documents of the input class ( from the whole index or from a subset)
 * that contains the word ( in a specific field or in all the fields if no one selected)
 *
 * @param word      the token produced by the analyzer
 * @param fieldName the field the word is coming from
 * @param term      the class term
 * @return number of documents of the input class
 * @throws java.io.IOException If there is a low-level I/O error
 */
private int getWordFreqForClass(String word, String fieldName, Term term) throws IOException {
  BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
  BooleanQuery.Builder subQuery = new BooleanQuery.Builder();
  subQuery.add(new BooleanClause(new TermQuery(new Term(fieldName, word)), BooleanClause.Occur.SHOULD));
  booleanQuery.add(new BooleanClause(subQuery.build(), BooleanClause.Occur.MUST));
  booleanQuery.add(new BooleanClause(new TermQuery(term), BooleanClause.Occur.MUST));
  if (query != null) {
    booleanQuery.add(query, BooleanClause.Occur.MUST);
  }
  TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
  indexSearcher.search(booleanQuery.build(), totalHitCountCollector);
  return totalHitCountCollector.getTotalHits();
}
 
源代码6 项目: lucene-solr   文件: CommandHandler.java
/**
 * Invokes search with the specified filter and collector.  
 * If a time limit has been specified then wrap the collector in the TimeLimitingCollector
 */
private void searchWithTimeLimiter(Query query, 
                                   ProcessedFilter filter, 
                                   Collector collector) throws IOException {
  if (queryCommand.getTimeAllowed() > 0 ) {
    collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), queryCommand.getTimeAllowed());
  }

  TotalHitCountCollector hitCountCollector = new TotalHitCountCollector();
  if (includeHitCount) {
    collector = MultiCollector.wrap(collector, hitCountCollector);
  }

  query = QueryUtils.combineQueryAndFilter(query, filter.filter);

  if (filter.postFilter != null) {
    filter.postFilter.setLastDelegate(collector);
    collector = filter.postFilter;
  }

  try {
    searcher.search(query, collector);
  } catch (TimeLimitingCollector.TimeExceededException | ExitableDirectoryReader.ExitingReaderException x) {
    partialResults = true;
    log.warn("Query: {}; {}", query, x.getMessage());
  }

  if (includeHitCount) {
    totalHitCount = hitCountCollector.getTotalHits();
  }
}
 
源代码7 项目: lucene-solr   文件: CachingNaiveBayesClassifier.java
private Map<BytesRef, Integer> getWordFreqForClassess(String word) throws IOException {

    Map<BytesRef, Integer> insertPoint;
    insertPoint = termCClassHitCache.get(word);

    // if we get the answer from the cache
    if (insertPoint != null) {
      if (!insertPoint.isEmpty()) {
        return insertPoint;
      }
    }

    Map<BytesRef, Integer> searched = new ConcurrentHashMap<>();

    // if we dont get the answer, but it's relevant we must search it and insert to the cache
    if (insertPoint != null || !justCachedTerms) {
      for (BytesRef cclass : cclasses) {
        BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
        BooleanQuery.Builder subQuery = new BooleanQuery.Builder();
        for (String textFieldName : textFieldNames) {
          subQuery.add(new BooleanClause(new TermQuery(new Term(textFieldName, word)), BooleanClause.Occur.SHOULD));
        }
        booleanQuery.add(new BooleanClause(subQuery.build(), BooleanClause.Occur.MUST));
        booleanQuery.add(new BooleanClause(new TermQuery(new Term(classFieldName, cclass)), BooleanClause.Occur.MUST));
        if (query != null) {
          booleanQuery.add(query, BooleanClause.Occur.MUST);
        }
        TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
        indexSearcher.search(booleanQuery.build(), totalHitCountCollector);

        int ret = totalHitCountCollector.getTotalHits();
        if (ret != 0) {
          searched.put(cclass, ret);
        }
      }
      if (insertPoint != null) {
        // threadsafe and concurrent write
        termCClassHitCache.put(word, searched);
      }
    }

    return searched;
  }
 
private static int getTotalDocs(NodeContext context) throws IOException {
    TotalHitCountCollector collector = new TotalHitCountCollector();
    context.req.getSearcher().search(new MatchAllDocsQuery(), collector);
    return collector.getTotalHits();
}