类com.mongodb.client.model.Projections源码实例Demo

下面列出了怎么用com.mongodb.client.model.Projections的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: biliob_backend   文件: AuthorServiceImpl.java
@Override
public ResponseEntity getTopAuthor() {
    Calendar c = Calendar.getInstance();
    c.setTimeZone(TimeZone.getTimeZone("CTT"));
    c.add(Calendar.HOUR, 7);
    Date cDate = c.getTime();
    AggregateIterable<Document> r = mongoClient.getDatabase("biliob").getCollection("author")
            .aggregate(Arrays.asList(sort(descending("cFans")), limit(2),
                    project(Projections.fields(Projections.excludeId(),
                            Projections.include("name", "face", "official"),
                            Projections.computed("data",
                                    new Document().append("$filter",
                                            new Document().append("input", "$data")
                                                    .append("as", "eachData")
                                                    .append("cond", new Document().append("$gt",
                                                            Arrays.asList("$$eachData.datetime",
                                                                    cDate)))))))));
    ArrayList<Document> result = new ArrayList<>(2);
    for (Document document : r) {
        result.add(document);
    }

    return ResponseEntity.ok(result);
}
 
源代码2 项目: biliob_backend   文件: AuthorServiceImpl.java
@Override
public ResponseEntity getLatestTopAuthorData() {
    AggregateIterable<Document> r = mongoClient.getDatabase("biliob").getCollection("author")
            .aggregate(Arrays.asList(sort(descending("cFans")), limit(2),
                    project(Projections.fields(Projections.excludeId(),
                            Projections.include("name", "face", "official"),
                            Projections.computed("data",
                                    new Document("$slice", Arrays.asList("$data", 1)))))));
    ArrayList<Document> result = new ArrayList<>(2);
    for (Document document : r) {
        result.add(document);
    }
    return ResponseEntity.ok(result);
}
 
源代码3 项目: ditto   文件: MongoThingsSearchPersistence.java
@Override
public Source<Metadata, NotUsed> sudoStreamMetadata(final EntityId lowerBound) {
    final Bson notDeletedFilter = Filters.exists(FIELD_DELETE_AT, false);
    final Bson filter = lowerBound.isDummy()
            ? notDeletedFilter
            : Filters.and(notDeletedFilter, Filters.gt(FIELD_ID, lowerBound.toString()));
    final Bson relevantFieldsProjection =
            Projections.include(FIELD_ID, FIELD_REVISION, FIELD_POLICY_ID, FIELD_POLICY_REVISION,
                    FIELD_PATH_MODIFIED);
    final Bson sortById = Sorts.ascending(FIELD_ID);
    final Publisher<Document> publisher = collection.find(filter)
            .projection(relevantFieldsProjection)
            .sort(sortById);
    return Source.fromPublisher(publisher).map(MongoThingsSearchPersistence::readAsMetadata);
}
 
源代码4 项目: kafka-connect-mongodb   文件: DatabaseReader.java
private FindIterable<Document> find(int page){
    final FindIterable<Document> documents = oplog
            .find(query)
            .sort(new Document("$natural", 1))
            .skip(page * batchSize)
            .limit(batchSize)
            .projection(Projections.include("ts", "op", "ns", "o"))
            .cursorType(CursorType.TailableAwait);
    return documents;
}
 
源代码5 项目: EDDI   文件: DifferConversationStore.java
List<String> getAllDifferConversationIds() {
    List<String> ret = new LinkedList<>();

    var includeConversationIdField = Projections.include(CONVERSATION_ID_FIELD);
    var documents = collection.find().projection(includeConversationIdField);
    for (var conversationInfo : documents) {
        ret.add(conversationInfo.getConversationId());
    }

    return ret;
}
 
源代码6 项目: beam   文件: FindQuery.java
@Override
public MongoCursor<Document> apply(MongoCollection<Document> collection) {
  return collection
      .find()
      .filter(filters())
      .limit(limit())
      .projection(Projections.include(projection()))
      .iterator();
}
 
源代码7 项目: repositoryminer   文件: SnapshotAnalysisPlugin.java
/**
 * This method is responsible for preparing the repository to run the plugin,
 * and should be called only once.
 * 
 * @param repositoryKey
 *            the repository key
 * @throws IOException
 */
public void init(String repositoryKey) throws IOException {
	Document repoDoc = new RepositoryDAO().findByKey(repositoryKey, Projections.include("_id", "path", "scm"));
	if (repoDoc == null) {
		throw new RepositoryMinerException("Repository with the key " + repositoryKey + " does not exists");
	}

	scm = SCMFactory.getSCM(SCMType.valueOf(repoDoc.getString("scm")));
	repositoryId = repoDoc.getObjectId("_id");
	tmpRepository = RMFileUtils.copyFolderToTmp(repoDoc.getString("path"),
			StringUtils.encodeToSHA1(repositoryId.toHexString()));

	scm.open(tmpRepository);
}
 
private static void updateCommits() {
	LOG.info("Start commits extraction process.");
	Set<String> commits = new HashSet<String>(scm.getCommitsNames());
	List<String> commitsDb = new ArrayList<>();
	
	CommitDAO commitDao = new CommitDAO();
	for (Document doc : commitDao.findByRepository(repository.getId(), Projections.include("hash"))) {
		commitsDb.add(doc.getString("hash"));
	}

	// removes the commits already saved and delete the commits that were modified.
	for (String commitName : commitsDb) {
		if (!commits.remove(commitName)) {
			commitDao.delete(commitName, repository.getId());
		}
	}
	
	// saves the new/modified commits
	if (commits.size() > 0) {
		List<Document> documents = new ArrayList<>();
		for (Commit commit : scm.getCommits(commits)) {
			documents.add(commit.toDocument());
			contributos.add(commit.getAuthor());
			contributos.add(commit.getCommitter());
		}
		commitDao.insertMany(documents);
	}
	LOG.info("Commits extraction process finished.");
}
 
源代码9 项目: repositoryminer   文件: RepositoryMinerMetrics.java
private void checkDuplicatedAnalysis(String hash) {
	CodeAnalysisReportDAO configDao = new CodeAnalysisReportDAO();
	Document doc = configDao.findByCommitHash(hash, Projections.include("_id"));
	if (doc != null) {
		configDao.deleteById(doc.getObjectId("_id"));
		new CodeAnalysisDAO().deleteByReport(doc.getObjectId("_id"));
	}
}
 
private void checkDuplicatedAnalysis(String hash) {
    TechnicalDebtReportDAO configDao = new TechnicalDebtReportDAO();
    Document doc = configDao.findByCommitHash(hash, Projections.include("_id"));
    if (doc != null) {
        configDao.deleteById(doc.getObjectId("_id"));
        new TechnicalDebtDAO().deleteByReport(doc.getObjectId("_id"));
    }
}
 
源代码11 项目: repositoryminer   文件: FindBugsCheker.java
@SuppressWarnings("unchecked")
@Override
public void check(String commit) {
	FindBugsDAO dao = new FindBugsDAO();
	List<Document> analysisDoc = dao.findByCommit(commit, Projections.include("filename", "bugs.category"));

	for (Document fileDoc : analysisDoc) {
		TDItem tdItem = searchFile(fileDoc.getString("filename"));
		List<Document> bugs = (List<Document>) fileDoc.get("bugs");

		int specificBugs = 0;
		for (Document bug : bugs) {
			String category = bug.getString("category");
			if (category.equals("MT_CORRECTNESS")) {
				addTDIndicator(tdItem, MULTITHREAD_CORRECTNESS, 1);
				specificBugs++;
			} else if (category.equals("PERFORMANCE")) {
				addTDIndicator(tdItem, SLOW_ALGORITHM, 1);
				specificBugs++;
			}
		}

		if ((bugs.size() - specificBugs) > 0) {
			addTDIndicator(tdItem, AUTOMATIC_STATIC_ANALYSIS_ISSUES, bugs.size() - specificBugs);
		}
	}
}
 
源代码12 项目: repositoryminer   文件: CheckStyleChecker.java
@SuppressWarnings("unchecked")
@Override
public void check(String commit) {
	CheckstyleAuditDAO dao = new CheckstyleAuditDAO();
	List<Document> analysisDoc = dao.findByCommit(commit, Projections.include("filename", "style_problems.line"));

	for (Document fileDoc : analysisDoc) {
		TDItem tdItem = searchFile(fileDoc.getString("filename"));
		addTDIndicator(tdItem, CODE_WITHOUT_STANDARDS, ((List<Document>) fileDoc.get("style_problems")).size());
	}
}
 
源代码13 项目: repositoryminer   文件: CPDChecker.java
@SuppressWarnings("unchecked")
@Override
public void check(String commit) {
	CPDDAO dao = new CPDDAO();
	List<Document> analysisDoc = dao.findByCommit(commit, Projections.include("filename", "occurrences.filename"));

	for (Document doc : analysisDoc) {
		for (Document occurrence : (List<Document>) doc.get("occurrences", List.class)) {
			TDItem tdItem = searchFile(occurrence.getString("filename"));
			addTDIndicator(tdItem, DUPLICATED_CODE, 1);
		}
	}
}
 
源代码14 项目: repositoryminer   文件: ExCommentChecker.java
@SuppressWarnings("unchecked")
@Override
public void check(String commit) {
	ExCommentDAO dao = new ExCommentDAO();
	List<Document> analysisDoc = dao.findByCommit(commit, Projections.include("filename",
			"comments.patterns.tdtype"));
	
	for (Document fileDoc : analysisDoc) {
		TDItem tdItem = searchFile(fileDoc.getString("filename"));
	
		for (Document comment : (List<Document>) fileDoc.get("comments")) {
			for (Document pattern : (List<Document>) comment.get("patterns")) {
				String tdtype = pattern.getString("tdtype").replace(' ', '_').toUpperCase();

				if (tdtype.length() == 0) {
					addTDIndicator(tdItem, COMMENT_ANALYSIS_UNKNOWN_DEBT, 1);
				} else {
					addTDIndicator(tdItem, TDIndicator.getTDIndicator("COMMENT_ANALYSIS_"+tdtype), 1);
				}
			}
		}
	}
}
 
源代码15 项目: tutorials   文件: AggregationLiveTest.java
@Test
public void givenCountryCollection_whenNeighborsCalculated_thenMaxIsFifteenInChina() {
    Bson borderingCountriesCollection = project(Projections.fields(Projections.excludeId(), Projections.include("name"), Projections.computed("borderingCountries", Projections.computed("$size", "$borders"))));

    int maxValue = collection.aggregate(Arrays.asList(borderingCountriesCollection, group(null, Accumulators.max("max", "$borderingCountries"))))
        .first()
        .getInteger("max");

    assertEquals(15, maxValue);

    Document maxNeighboredCountry = collection.aggregate(Arrays.asList(borderingCountriesCollection, match(Filters.eq("borderingCountries", maxValue))))
        .first();
    assertTrue(maxNeighboredCountry.containsValue("China"));

}
 
源代码16 项目: rya   文件: AggregationPipelineQueryNode.java
/**
 * Add a join with an individual {@link StatementPattern} to the pipeline.
 * @param sp The statement pattern to join with
 * @return true if the join was successfully added to the pipeline.
 */
public boolean joinWith(final StatementPattern sp) {
    Preconditions.checkNotNull(sp);
    // 1. Determine shared variables and new variables
    final StatementVarMapping spMap = new StatementVarMapping(sp, varToOriginalName);
    final NavigableSet<String> sharedVars = new ConcurrentSkipListSet<>(spMap.varNames());
    sharedVars.retainAll(assuredBindingNames);
    // 2. Join on one shared variable
    final String joinKey =  sharedVars.pollFirst();
    final String collectionName = collection.getNamespace().getCollectionName();
    Bson join;
    if (joinKey == null) {
        return false;
    }
    else {
        join = Aggregates.lookup(collectionName,
                HASHES + "." + joinKey,
                spMap.hashField(joinKey),
                JOINED_TRIPLE);
    }
    pipeline.add(join);
    // 3. Unwind the joined triples so each document represents a binding
    //   set (solution) from the base branch and a triple that may match.
    pipeline.add(Aggregates.unwind("$" + JOINED_TRIPLE));
    // 4. (Optional) If there are any shared variables that weren't used as
    //   the join key, project all existing fields plus a new field that
    //   tests the equality of those shared variables.
    final Document matchOpts = getMatchExpression(sp, JOINED_TRIPLE);
    if (!sharedVars.isEmpty()) {
        final List<Bson> eqTests = new LinkedList<>();
        for (final String varName : sharedVars) {
            final String oldField = valueFieldExpr(varName);
            final String newField = joinFieldExpr(spMap.valueField(varName));
            final Bson eqTest = new Document("$eq", Arrays.asList(oldField, newField));
            eqTests.add(eqTest);
        }
        final Bson eqProjectOpts = Projections.fields(
                Projections.computed(FIELDS_MATCH, Filters.and(eqTests)),
                Projections.include(JOINED_TRIPLE, VALUES, HASHES, TYPES, LEVEL, TIMESTAMP));
        pipeline.add(Aggregates.project(eqProjectOpts));
        matchOpts.put(FIELDS_MATCH, true);
    }
    // 5. Filter for solutions whose triples match the joined statement
    //  pattern, and, if applicable, whose additional shared variables
    //  match the current solution.
    pipeline.add(Aggregates.match(matchOpts));
    // 6. Project the results to include variables from the new SP (with
    // appropriate renaming) and variables referenced only in the base
    // pipeline (with previous names).
    final Bson finalProjectOpts = new StatementVarMapping(sp, varToOriginalName)
            .getProjectExpression(assuredBindingNames,
                    str -> joinFieldExpr(str));
    assuredBindingNames.addAll(spMap.varNames());
    bindingNames.addAll(spMap.varNames());
    pipeline.add(Aggregates.project(finalProjectOpts));
    return true;
}
 
源代码17 项目: rya   文件: AggregationPipelineQueryNode.java
/**
 * Add a SPARQL projection or multi-projection operation to the pipeline.
 * The number of documents produced by the pipeline after this operation
 * will be the number of documents entering this stage (the number of
 * intermediate results) multiplied by the number of
 * {@link ProjectionElemList}s supplied here. Empty projections are
 * unsupported; if one or more projections given binds zero variables, then
 * the pipeline will be unchanged and the method will return false.
 * @param projections One or more projections, i.e. mappings from the result
 *  at this stage of the query into a set of variables.
 * @return true if the projection(s) were added to the pipeline.
 */
public boolean project(final Iterable<ProjectionElemList> projections) {
    if (projections == null || !projections.iterator().hasNext()) {
        return false;
    }
    final List<Bson> projectOpts = new LinkedList<>();
    final Set<String> bindingNamesUnion = new HashSet<>();
    Set<String> bindingNamesIntersection = null;
    for (final ProjectionElemList projection : projections) {
        if (projection.getElements().isEmpty()) {
            // Empty projections are unsupported -- fail when seen
            return false;
        }
        final Document valueDoc = new Document();
        final Document hashDoc = new Document();
        final Document typeDoc = new Document();
        final Set<String> projectionBindingNames = new HashSet<>();
        for (final ProjectionElem elem : projection.getElements()) {
            String to = elem.getTargetName();
            // If the 'to' name is invalid, replace it internally
            if (!isValidFieldName(to)) {
                to = replace(to);
            }
            String from = elem.getSourceName();
            // If the 'from' name is invalid, use the internal substitute
            if (varToOriginalName.containsValue(from)) {
                from = varToOriginalName.inverse().get(from);
            }
            projectionBindingNames.add(to);
            if (to.equals(from)) {
                valueDoc.append(to, 1);
                hashDoc.append(to, 1);
                typeDoc.append(to, 1);
            }
            else {
                valueDoc.append(to, valueFieldExpr(from));
                hashDoc.append(to, hashFieldExpr(from));
                typeDoc.append(to, typeFieldExpr(from));
            }
        }
        bindingNamesUnion.addAll(projectionBindingNames);
        if (bindingNamesIntersection == null) {
            bindingNamesIntersection = new HashSet<>(projectionBindingNames);
        }
        else {
            bindingNamesIntersection.retainAll(projectionBindingNames);
        }
        projectOpts.add(new Document()
                .append(VALUES, valueDoc)
                .append(HASHES, hashDoc)
                .append(TYPES, typeDoc)
                .append(LEVEL, "$" + LEVEL)
                .append(TIMESTAMP, "$" + TIMESTAMP));
    }
    if (projectOpts.size() == 1) {
        pipeline.add(Aggregates.project(projectOpts.get(0)));
    }
    else {
        final String listKey = "PROJECTIONS";
        final Bson projectIndividual = Projections.fields(
                Projections.computed(VALUES, "$" + listKey + "." + VALUES),
                Projections.computed(HASHES, "$" + listKey + "." + HASHES),
                Projections.computed(TYPES, "$" + listKey + "." + TYPES),
                Projections.include(LEVEL),
                Projections.include(TIMESTAMP));
        pipeline.add(Aggregates.project(Projections.computed(listKey, projectOpts)));
        pipeline.add(Aggregates.unwind("$" + listKey));
        pipeline.add(Aggregates.project(projectIndividual));
    }
    assuredBindingNames.clear();
    bindingNames.clear();
    assuredBindingNames.addAll(bindingNamesIntersection);
    bindingNames.addAll(bindingNamesUnion);
    return true;
}
 
源代码18 项目: rya   文件: AggregationPipelineQueryNode.java
/**
 * Add a SPARQL filter to the pipeline, if possible. A filter eliminates
 * results that don't satisfy a given condition. Not all conditional
 * expressions are supported. If unsupported expressions are used in the
 * filter, the pipeline will remain unchanged and this method will return
 * false. Currently only supports binary {@link Compare} conditions among
 * variables and/or literals.
 * @param condition The filter condition
 * @return True if the filter was successfully converted into a pipeline
 *  step, false otherwise.
 */
public boolean filter(final ValueExpr condition) {
    if (condition instanceof Compare) {
        final Compare compare = (Compare) condition;
        final Compare.CompareOp operator = compare.getOperator();
        final Object leftArg = valueFieldExpr(compare.getLeftArg());
        final Object rightArg = valueFieldExpr(compare.getRightArg());
        if (leftArg == null || rightArg == null) {
            // unsupported value expression, can't convert filter
            return false;
        }
        final String opFunc;
        switch (operator) {
        case EQ:
            opFunc = "$eq";
            break;
        case NE:
            opFunc = "$ne";
            break;
        case LT:
            opFunc = "$lt";
            break;
        case LE:
            opFunc = "$le";
            break;
        case GT:
            opFunc = "$gt";
            break;
        case GE:
            opFunc = "$ge";
            break;
        default:
            // unrecognized comparison operator, can't convert filter
            return false;
        }
        final Document compareDoc = new Document(opFunc, Arrays.asList(leftArg, rightArg));
        pipeline.add(Aggregates.project(Projections.fields(
                Projections.computed("FILTER", compareDoc),
                Projections.include(VALUES, HASHES, TYPES, LEVEL, TIMESTAMP))));
        pipeline.add(Aggregates.match(new Document("FILTER", true)));
        pipeline.add(Aggregates.project(Projections.fields(
                Projections.include(VALUES, HASHES, TYPES, LEVEL, TIMESTAMP))));
        return true;
    }
    return false;
}
 
源代码19 项目: rya   文件: AggregationPipelineQueryNode.java
/**
 * Given that the current state of the pipeline produces data that can be
 * interpreted as triples, add a project step to map each result from the
 * intermediate result structure to a structure that can be stored in the
 * triple store. Does not modify the internal pipeline, which will still
 * produce intermediate results suitable for query evaluation.
 * @param timestamp Attach this timestamp to the resulting triples.
 * @param requireNew If true, add an additional step to check constructed
 *  triples against existing triples and only include new ones in the
 *  result. Adds a potentially expensive $lookup step.
 * @throws IllegalStateException if the results produced by the current
 *  pipeline do not have variable names allowing them to be interpreted as
 *  triples (i.e. "subject", "predicate", and "object").
 */
public List<Bson> getTriplePipeline(final long timestamp, final boolean requireNew) {
    if (!assuredBindingNames.contains(SUBJECT)
            || !assuredBindingNames.contains(PREDICATE)
            || !assuredBindingNames.contains(OBJECT)) {
        throw new IllegalStateException("Current pipeline does not produce "
                + "records that can be converted into triples.\n"
                + "Required variable names: <" + SUBJECT + ", " + PREDICATE
                + ", " + OBJECT + ">\nCurrent variable names: "
                + assuredBindingNames);
    }
    final List<Bson> triplePipeline = new LinkedList<>(pipeline);
    final List<Bson> fields = new LinkedList<>();
    fields.add(Projections.computed(SUBJECT, valueFieldExpr(SUBJECT)));
    fields.add(Projections.computed(SUBJECT_HASH, hashFieldExpr(SUBJECT)));
    fields.add(Projections.computed(PREDICATE, valueFieldExpr(PREDICATE)));
    fields.add(Projections.computed(PREDICATE_HASH, hashFieldExpr(PREDICATE)));
    fields.add(Projections.computed(OBJECT, valueFieldExpr(OBJECT)));
    fields.add(Projections.computed(OBJECT_HASH, hashFieldExpr(OBJECT)));
    fields.add(Projections.computed(OBJECT_TYPE,
            ConditionalOperators.ifNull(typeFieldExpr(OBJECT), DEFAULT_TYPE)));
    fields.add(Projections.computed(CONTEXT, DEFAULT_CONTEXT));
    fields.add(Projections.computed(STATEMENT_METADATA, DEFAULT_METADATA));
    fields.add(DEFAULT_DV);
    fields.add(Projections.computed(TIMESTAMP, new Document("$literal", timestamp)));
    fields.add(Projections.computed(LEVEL, new Document("$add", Arrays.asList("$" + LEVEL, 1))));
    triplePipeline.add(Aggregates.project(Projections.fields(fields)));
    if (requireNew) {
        // Prune any triples that already exist in the data store
        final String collectionName = collection.getNamespace().getCollectionName();
        final Bson includeAll = Projections.include(SUBJECT, SUBJECT_HASH,
                PREDICATE, PREDICATE_HASH, OBJECT, OBJECT_HASH,
                OBJECT_TYPE, CONTEXT, STATEMENT_METADATA,
                DOCUMENT_VISIBILITY, TIMESTAMP, LEVEL);
        final List<Bson> eqTests = new LinkedList<>();
        eqTests.add(new Document("$eq", Arrays.asList("$$this." + PREDICATE_HASH, "$" + PREDICATE_HASH)));
        eqTests.add(new Document("$eq", Arrays.asList("$$this." + OBJECT_HASH, "$" + OBJECT_HASH)));
        final Bson redundantFilter = new Document("$filter", new Document("input", "$" + JOINED_TRIPLE)
                .append("as", "this").append("cond", new Document("$and", eqTests)));
        triplePipeline.add(Aggregates.lookup(collectionName, SUBJECT_HASH,
                SUBJECT_HASH, JOINED_TRIPLE));
        final String numRedundant = "REDUNDANT";
        triplePipeline.add(Aggregates.project(Projections.fields(includeAll,
                Projections.computed(numRedundant, new Document("$size", redundantFilter)))));
        triplePipeline.add(Aggregates.match(Filters.eq(numRedundant, 0)));
        triplePipeline.add(Aggregates.project(Projections.fields(includeAll)));
    }
    return triplePipeline;
}
 
State stateInDb(Process process) {

		return State.valueOf(client.getDatabase(DB_NAME).getCollection("processes").find(Filters.eq("_id", process.getId()))
				.projection(Projections.include("state")).first().get("state", String.class));
	}
 
 类所在包
 类方法
 同包方法