java.util.TreeSet#forEach ( )源码实例Demo

下面列出了java.util.TreeSet#forEach ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

private Map<String, Object> mergeDimensionItems(Map<String, Map<String, Object>> items) {
  Map<String, Object> flatMap = new HashMap<>();
  Set<String> keySet = items.keySet();
  if (keySet.contains("application")) {
    flatMap.putAll(items.get("application"));
    keySet.remove("application");
  }
  if (!keySet.isEmpty()) {
    TreeSet<String> sortedKeys = new TreeSet<String>(new Comparator<String>() {
      @Override
      public int compare(String o1, String o2) {
        return o1.length() - o2.length();
      }
    });
    sortedKeys.addAll(keySet);
    sortedKeys.forEach(key -> flatMap.putAll(items.get(key)));
  }
  return flatMap;
}
 
源代码2 项目: lucene-solr   文件: SimUtils.java
/**
 * Prepare collection and node / host names for redaction.
 * @param clusterState cluster state
 */
public static RedactionUtils.RedactionContext getRedactionContext(ClusterState clusterState) {
  RedactionUtils.RedactionContext ctx = new RedactionUtils.RedactionContext();
  TreeSet<String> names = new TreeSet<>(clusterState.getLiveNodes());
  for (String nodeName : names) {
    String urlString = Utils.getBaseUrlForNodeName(nodeName, "http");
    try {
      URL u = new URL(urlString);
      // protocol format
      String hostPort = u.getHost() + ":" + u.getPort();
      ctx.addName(u.getHost() + ":" + u.getPort(), RedactionUtils.NODE_REDACTION_PREFIX);
      // node name format
      ctx.addEquivalentName(hostPort, u.getHost() + "_" + u.getPort() + "_", RedactionUtils.NODE_REDACTION_PREFIX);
    } catch (MalformedURLException e) {
      log.warn("Invalid URL for node name {}, replacing including protocol and path", nodeName, e);
      ctx.addName(urlString, RedactionUtils.NODE_REDACTION_PREFIX);
      ctx.addEquivalentName(urlString, Utils.getBaseUrlForNodeName(nodeName, "https"), RedactionUtils.NODE_REDACTION_PREFIX);
    }
  }
  names.clear();
  names.addAll(clusterState.getCollectionStates().keySet());
  names.forEach(n -> ctx.addName(n, RedactionUtils.COLL_REDACTION_PREFIX));
  return ctx;
}
 
源代码3 项目: picard   文件: FindMendelianViolations.java
private void writeAllViolations(final MendelianViolationDetector.Result result) {
    if (VCF_DIR != null) {
        LOG.info(String.format("Writing family violation VCFs to %s/", VCF_DIR.getAbsolutePath()));

        final VariantContextComparator vcComparator = new VariantContextComparator(inputHeader.get().getContigLines());
        final Set<VCFHeaderLine> headerLines = new LinkedHashSet<>(inputHeader.get().getMetaDataInInputOrder());

        headerLines.add(new VCFInfoHeaderLine(MendelianViolationDetector.MENDELIAN_VIOLATION_KEY, 1, VCFHeaderLineType.String, "Type of mendelian violation."));
        headerLines.add(new VCFInfoHeaderLine(MendelianViolationDetector.ORIGINAL_AC, VCFHeaderLineCount.A, VCFHeaderLineType.Integer, "Original AC"));
        headerLines.add(new VCFInfoHeaderLine(MendelianViolationDetector.ORIGINAL_AF, VCFHeaderLineCount.A, VCFHeaderLineType.Float, "Original AF"));
        headerLines.add(new VCFInfoHeaderLine(MendelianViolationDetector.ORIGINAL_AN, 1, VCFHeaderLineType.Integer, "Original AN"));

        for (final PedFile.PedTrio trio : pedFile.get().values()) {
            final File outputFile = new File(VCF_DIR, IOUtil.makeFileNameSafe(trio.getFamilyId() + IOUtil.VCF_FILE_EXTENSION));
            LOG.info(String.format("Writing %s violation VCF to %s", trio.getFamilyId(), outputFile.getAbsolutePath()));

            final VariantContextWriter out = new VariantContextWriterBuilder()
                    .setOutputFile(outputFile)
                    .unsetOption(INDEX_ON_THE_FLY)
                    .build();

            final VCFHeader newHeader = new VCFHeader(headerLines, CollectionUtil.makeList(trio.getMaternalId(), trio.getPaternalId(), trio.getIndividualId()));
            final TreeSet<VariantContext> orderedViolations = new TreeSet<>(vcComparator);

            orderedViolations.addAll(result.violations().get(trio.getFamilyId()));
            out.writeHeader(newHeader);
            orderedViolations.forEach(out::add);

            out.close();
        }
    }
}
 
源代码4 项目: che   文件: TopologicalSort.java
/**
 * Given the function for determining the predecessors of the nodes, return the list of the nodes
 * in topological order. I.e. all predecessors will be placed sooner in the list than their
 * successors. Note that the input collection is assumed to contain no duplicate entries as
 * determined by the equality of the {@code ID} type. If such duplicates are present in the input
 * collection, the output list will only contain the first instance of the duplicates from the
 * input collection.
 *
 * <p>The implemented sort algorithm is stable. If there is no relationship between 2 nodes, they
 * retain the relative position to each other as they had in the provided collection (e.g. if "a"
 * preceded "b" in the original collection and there is no relationship between them (as
 * determined by the predecessor function), the "a" will still precede "b" in the resulting list.
 * Other nodes may be inserted in between them though in the result).
 *
 * <p>The cycles in the graph determined by the predecessor function are ignored and nodes in the
 * cycle are placed into the output list in the source order.
 *
 * @param nodes the collection of nodes
 * @return the list of nodes sorted in topological order
 */
public List<N> sort(Collection<N> nodes) {
  // the linked hashmap is important to retain the original order of elements unless required
  // by the dependencies between nodes
  LinkedHashMap<ID, NodeInfo<ID, N>> nodeInfos = newLinkedHashMapWithExpectedSize(nodes.size());
  List<NodeInfo<ID, N>> results = new ArrayList<>(nodes.size());

  int pos = 0;
  boolean needsSorting = false;
  for (N node : nodes) {
    ID nodeID = identityExtractor.apply(node);
    // we need the set to be modifiable, so let's make our own
    Set<ID> preds = new HashSet<>(directPredecessorsExtractor.apply(node));
    needsSorting = needsSorting || !preds.isEmpty();

    NodeInfo<ID, N> nodeInfo = nodeInfos.computeIfAbsent(nodeID, __ -> new NodeInfo<>());
    nodeInfo.id = nodeID;
    nodeInfo.predecessors = preds;
    nodeInfo.sourcePosition = pos++;
    nodeInfo.node = node;

    for (ID pred : preds) {
      // note that this means that we're inserting the nodeinfos into the map in an incorrect
      // order and will have to sort them in the source order before we do the actual topo sort.
      // We take that cost because we gamble on there being no dependencies in the nodes as a
      // common case.
      NodeInfo<ID, N> predNode = nodeInfos.computeIfAbsent(pred, __ -> new NodeInfo<>());
      if (predNode.successors == null) {
        predNode.successors = new HashSet<>();
      }
      predNode.successors.add(nodeID);
    }
  }

  if (needsSorting) {
    // because of the predecessors, we have put the nodeinfos in the map in an incorrect order.
    // we need to correct that before we try to sort...
    TreeSet<NodeInfo<ID, N>> tmp = new TreeSet<>(Comparator.comparingInt(a -> a.sourcePosition));
    tmp.addAll(nodeInfos.values());
    nodeInfos.clear();
    tmp.forEach(ni -> nodeInfos.put(ni.id, ni));

    // now we're ready to produce the results
    sort(nodeInfos, results);
  } else {
    // we don't need to sort, but we need to keep the expected behavior of removing the duplicates
    results = new ArrayList<>(nodeInfos.values());
  }

  return results.stream().map(ni -> ni.node).collect(Collectors.toList());
}