java.util.SortedMap#forEach ( )源码实例Demo

下面列出了java.util.SortedMap#forEach ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: gpmall   文件: WeChatBuildRequest.java
/**
 * @param parameters 请求参数
 * @return
 * @Description:将请求参数转换为xml格式的string
 */
@SuppressWarnings("rawtypes")
public static String getRequestXml(SortedMap<Object, Object> parameters) {
    StringBuffer sb = new StringBuffer();
    sb.append("<xml>");
    parameters.forEach((k,v) -> {
        if ("attach".equalsIgnoreCase((String) k) || "body".equalsIgnoreCase((String) k)
                || "sign".equalsIgnoreCase((String) k)) {
            sb.append("<" + k + ">" + "<![CDATA[" + v + "]]></" + k + ">");
        } else {
            sb.append("<" + k + ">" + v + "</" + k + ">");
        }
    });
    sb.append("</xml>");
    try {
        return sb.toString();
    } catch (Exception e) {
        log.error("map转化成xml异常:" + e);
    }
    return "";
}
 
public static Table<String, String, String> assignmentsToTable(
        SortedMap<String, SortedSet<SingleWorkerAssignment<Step2bGoldReasonAnnotator.SentenceLabel>>> assignments)
{
    TreeBasedTable<String, String, String> result = TreeBasedTable.create();

    assignments.forEach((unitID, singleWorkerAssignments) -> {
        singleWorkerAssignments.forEach(sentenceLabelSingleWorkerAssignment -> {
            String workerID = sentenceLabelSingleWorkerAssignment.getWorkerID();
            String label = sentenceLabelSingleWorkerAssignment.getLabel().toString();

            // update the table
            result.put(unitID, workerID, label);
        });
    });

    return result;
}
 
源代码3 项目: styx   文件: PlainCliOutput.java
@Override
public void printStates(RunStateDataPayload runStateDataPayload) {
  SortedMap<WorkflowId, SortedSet<RunStateDataPayload.RunStateData>> groupedStates =
      CliUtil.groupStates(runStateDataPayload.activeStates());

  groupedStates.forEach((workflowId, value) -> value.forEach(runStateData -> {
    final StateData stateData = runStateData.stateData();
    System.out.println(String.format(
        "%s %s %s %s %s %d %s",
        workflowId.componentId(),
        workflowId.id(),
        runStateData.workflowInstance().parameter(),
        runStateData.state(),
        stateData.executionId().orElse("<no-execution-id>"),
        stateData.tries(),
        stateData.message().map(Message::line).orElse("No info")
    ));
  }));
}
 
源代码4 项目: hudi   文件: DatadogReporter.java
@Override
public void report(
    SortedMap<String, Gauge> gauges,
    SortedMap<String, Counter> counters,
    SortedMap<String, Histogram> histograms,
    SortedMap<String, Meter> meters,
    SortedMap<String, Timer> timers) {
  final long now = clock.getTime() / 1000;
  final PayloadBuilder builder = new PayloadBuilder();

  builder.withMetricType(MetricType.gauge);
  gauges.forEach((metricName, metric) -> {
    builder.addGauge(prefix(metricName), now, (long) metric.getValue());
  });

  host.ifPresent(builder::withHost);
  tags.ifPresent(builder::withTags);

  client.send(builder.build());
}
 
源代码5 项目: onos   文件: GroupsListCommand.java
@Override
protected void doExecute() {
    DeviceService deviceService = get(DeviceService.class);
    GroupService groupService = get(GroupService.class);
    SortedMap<Device, List<Group>> sortedGroups =
            getSortedGroups(deviceService, groupService);

    if (referencedOnly && unreferencedOnly) {
        print("Options -r and -u cannot be used at the same time");
        return;
    }

    if (outputJson()) {
        print("%s", json(sortedGroups));
    } else {
        sortedGroups.forEach((device, groups) -> printGroups(device.id(), groups));
    }
}
 
源代码6 项目: onos   文件: VirtualFlowsListCommand.java
@Override
protected void doExecute() {
    CoreService coreService = get(CoreService.class);

    VirtualNetworkService vnetservice = get(VirtualNetworkService.class);
    DeviceService deviceService = vnetservice.get(NetworkId.networkId(networkId),
                                                  DeviceService.class);
    FlowRuleService service = vnetservice.get(NetworkId.networkId(networkId),
                                              FlowRuleService.class);
    contentFilter = new StringFilter(filter, StringFilter.Strategy.AND);

    compilePredicate();

    SortedMap<Device, List<FlowEntry>> flows = getSortedFlows(deviceService, service);

    if (outputJson()) {
        print("%s", json(flows.keySet(), flows));
    } else {
        flows.forEach((device, flow) -> printFlows(device, flow, coreService));
    }
}
 
源代码7 项目: gpmall   文件: WeChatBuildRequest.java
/**
 * 微信支付签名算法sign
 *
 * @param key        商户的appsecret
 * @param parameters
 * @return
 */
@SuppressWarnings("unchecked")
public static String createSign(SortedMap<Object, Object> parameters, String key) {
    StringBuffer sb = new StringBuffer();
    // 所有参与传参的参数按照accsii排序(升序)
    parameters.forEach((k,v) ->{
        if (!"sign".equals(k) && !"key".equals(k) && null != v && !"".equals(v)) {
            sb.append(k + "=" + v + "&");
        }
    });
    sb.append("key=" + key);
    String sign = MD5Utils.GetMD5Code(sb.toString()).toUpperCase();
    return sign;
}
 
/**
 * Prints the transition table for the supplied word
 */
private static void printTransitions(String key, SortedMap<Double, String> probabilities) {
    System.out.println("Transitions for: " + key);
    System.out.println("/-------------+-------------\\");
    System.out.println("| Probability | Word        |");
    System.out.println("|-------------+-------------|");
    probabilities.forEach((p, w) -> System.out.format("|  %.4f     | %-12s|%n", p, w));
    System.out.println("\\-------------+-------------/");
}
 
源代码9 项目: onos   文件: TableStatisticsCommand.java
@Override
protected void doExecute() {
    FlowRuleService flowService = get(FlowRuleService.class);
    DeviceService deviceService = get(DeviceService.class);
    SortedMap<Device, List<TableStatisticsEntry>> deviceTableStats =
            getSortedTableStats(deviceService, flowService);

    if (outputJson()) {
        print("%s", json(deviceTableStats.keySet(), deviceTableStats));
    } else {
        deviceTableStats.forEach((device, tableStats) -> printTableStats(device, tableStats));
    }
}
 
源代码10 项目: styx   文件: GraphiteReporter.java
@Override
public void report(SortedMap<String, Gauge> gauges,
                   SortedMap<String, Counter> counters,
                   SortedMap<String, Histogram> histograms,
                   SortedMap<String, Meter> meters,
                   SortedMap<String, Timer> timers) {
    long timestamp = clock.getTime() / 1000;

    try {

        initConnection();
        gauges.forEach((name, gauge) ->
                doReport(name, gauge, timestamp, this::reportGauge));

        counters.forEach((name, counter) ->
                doReport(name, counter, timestamp, this::reportCounter));

        histograms.forEach((name, histogram) ->
                doReport(name, histogram, timestamp, this::reportHistogram));

        meters.forEach((name, meter) ->
                doReport(name, meter, timestamp, this::reportMetered));

        timers.forEach((name, timer) ->
                doReport(name, timer, timestamp, this::reportTimer));

        graphite.flush();
    } catch (Exception e) {
        LOGGER.error("Error reporting metrics" + e.getMessage(), e);
    } finally {
        try {
            graphite.close();
        } catch (IOException e1) {
            LOGGER.warn("Error closing Graphite", graphite, e1);
        }
    }
}
 
源代码11 项目: nifi   文件: OperatingSystemDiagnosticTask.java
@Override
public DiagnosticsDumpElement captureDump(final boolean verbose) {
    final OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean();
    final List<String> details = new ArrayList<>();

    final NumberFormat numberFormat = NumberFormat.getInstance();

    try {
        final SortedMap<String, String> attributes = new TreeMap<>();

        final ObjectName osObjectName = os.getObjectName();
        final MBeanInfo mbeanInfo = ManagementFactory.getPlatformMBeanServer().getMBeanInfo(osObjectName);
        for (final MBeanAttributeInfo attributeInfo : mbeanInfo.getAttributes()) {
            final String attributeName = attributeInfo.getName();
            if (IGNORABLE_ATTRIBUTE_NAMES.contains(attributeName)) {
                continue;
            }

            final Object attributeValue = ManagementFactory.getPlatformMBeanServer().getAttribute(osObjectName, attributeName);

            if (attributeValue instanceof Number) {
                attributes.put(attributeName, numberFormat.format(attributeValue));
            } else {
                attributes.put(attributeName, String.valueOf(attributeValue));
            }
        }

        attributes.forEach((key, value) -> details.add(key + " : " + value));
    } catch (final Exception e) {
        logger.error("Failed to obtain Operating System details", e);
        return new StandardDiagnosticsDumpElement("Operating System / Hardware", Collections.singletonList("Failed to obtain Operating System details"));
    }

    return new StandardDiagnosticsDumpElement("Operating System / Hardware", details);
}
 
源代码12 项目: Mutters   文件: OpenNLPIntentMatcher.java
@Override
protected SortedMap<Double, SortedSet<String>> generateSortedScoreMap(String[] utteranceTokens)
{
  DocumentCategorizerME intentCategorizer = new DocumentCategorizerME(model);
  SortedMap<Double, Set<String>> scores = intentCategorizer.sortedScoreMap(utteranceTokens);

  // convert to sorted set of intents
  SortedMap<Double, SortedSet<String>> sortedScores = new TreeMap<>();
  scores.forEach((score, intents) -> sortedScores.put(score, new TreeSet<>(intents)));

  return sortedScores;
}
 
源代码13 项目: batfish   文件: BatfishCompressionTest.java
/**
 * Test the following invariant: if a FIB appears on concrete router “r”, then a corresponding
 * abstract FIB appears on one of these representatives. For example, if there is a concrete FIB
 * from C to D, then there should be an abstract FIB from A to B, where A is in representatives(C)
 * and B is in representatives(D).
 */
@Test
public void testCompressionFibs_compressibleNetwork() throws IOException {
  DataPlane origDataPlane = getDataPlane(compressibleNetwork());
  SortedMap<String, Configuration> compressedConfigs =
      compressNetwork(compressibleNetwork(), new HeaderSpace());
  DataPlane compressedDataPlane = getDataPlane(compressedConfigs);
  SortedMap<String, SortedMap<String, GenericRib<AnnotatedRoute<AbstractRoute>>>> origRibs =
      origDataPlane.getRibs();
  SortedMap<String, SortedMap<String, GenericRib<AnnotatedRoute<AbstractRoute>>>> compressedRibs =
      compressedDataPlane.getRibs();

  /* Compression removed a node */
  assertThat(compressedConfigs.entrySet(), hasSize(2));
  compressedConfigs.values().forEach(BatfishCompressionTest::assertIsCompressedConfig);
  compressedRibs.forEach(
      (hostname, compressedRibsByVrf) ->
          compressedRibsByVrf.forEach(
              (vrf, compressedRib) -> {
                GenericRib<AnnotatedRoute<AbstractRoute>> origRib =
                    origRibs.get(hostname).get(vrf);
                Set<AbstractRoute> origRoutes = origRib.getRoutes();
                Set<AbstractRoute> compressedRoutes = compressedRib.getRoutes();
                for (AbstractRoute route : compressedRoutes) {
                  /* Every compressed route should appear in original RIB */
                  assertThat(origRoutes, hasItem(route));
                }
              }));
}
 
源代码14 项目: batfish   文件: RoutesAnswererUtil.java
/**
 * Returns a {@link Multiset} of {@link Row}s for all routes present in all RIBs
 *
 * @param ribs {@link Map} representing all RIBs of all nodes
 * @param matchingNodes {@link Set} of hostnames of nodes whose routes are to be returned
 * @param network {@link Prefix} of the network used to filter the routes
 * @param protocolSpec {@link RoutingProtocolSpecifier} used to filter the routes
 * @param vrfRegex Regex used to filter the VRF of routes
 * @param ipOwners {@link Map} of {@link Ip} to {@link Set} of owner nodes
 * @return {@link Multiset} of {@link Row}s representing the routes
 */
static <T extends AbstractRouteDecorator> Multiset<Row> getMainRibRoutes(
    SortedMap<String, SortedMap<String, GenericRib<T>>> ribs,
    Set<String> matchingNodes,
    @Nullable Prefix network,
    RoutingProtocolSpecifier protocolSpec,
    String vrfRegex,
    @Nullable Map<Ip, Set<String>> ipOwners) {
  Multiset<Row> rows = HashMultiset.create();
  Pattern compiledVrfRegex = Pattern.compile(vrfRegex);
  Map<String, ColumnMetadata> columnMetadataMap =
      getTableMetadata(RibProtocol.MAIN).toColumnMap();
  ribs.forEach(
      (node, vrfMap) -> {
        if (matchingNodes.contains(node)) {
          vrfMap.forEach(
              (vrfName, rib) -> {
                if (compiledVrfRegex.matcher(vrfName).matches()) {
                  rib.getRoutes().stream()
                      .filter(
                          route ->
                              (network == null || network.equals(route.getNetwork()))
                                  && protocolSpec.getProtocols().contains(route.getProtocol()))
                      .forEach(
                          route ->
                              rows.add(
                                  abstractRouteToRow(
                                      node, vrfName, route, columnMetadataMap, ipOwners)));
                }
              });
        }
      });
  return rows;
}
 
源代码15 项目: batfish   文件: BidirectionalTracerouteAnswerer.java
@VisibleForTesting
static List<BidirectionalTrace> computeBidirectionalTraces(
    Set<Flow> flows, TracerouteEngine tracerouteEngine, boolean ignoreFilters) {
  SortedMap<Flow, List<TraceAndReverseFlow>> forwardTraces =
      tracerouteEngine.computeTracesAndReverseFlows(flows, ignoreFilters);

  Set<FlowAndSessions> reverseFlowsAndSessions =
      forwardTraces.values().stream()
          .flatMap(List::stream)
          .filter(tarf -> tarf.getReverseFlow() != null)
          .map(tarf -> new FlowAndSessions(tarf.getReverseFlow(), tarf.getNewFirewallSessions()))
          .collect(ImmutableSet.toImmutableSet());

  Map<FlowAndSessions, List<Trace>> reverseTraces =
      computeReverseTraces(tracerouteEngine, reverseFlowsAndSessions, ignoreFilters);

  List<BidirectionalTrace> result = new ArrayList<>();
  forwardTraces.forEach(
      (forwardFlow, forwardTraceAndReverseFlows) ->
          forwardTraceAndReverseFlows.forEach(
              forwardTraceAndReverseFlow -> {
                Trace forwardTrace = forwardTraceAndReverseFlow.getTrace();
                Flow reverseFlow = forwardTraceAndReverseFlow.getReverseFlow();
                Set<FirewallSessionTraceInfo> newSessions =
                    forwardTraceAndReverseFlow.getNewFirewallSessions();
                if (reverseFlow == null) {
                  result.add(
                      new BidirectionalTrace(forwardFlow, forwardTrace, newSessions, null, null));
                } else {
                  FlowAndSessions fas = new FlowAndSessions(reverseFlow, newSessions);
                  reverseTraces.get(fas).stream()
                      .map(
                          reverseTrace ->
                              new BidirectionalTrace(
                                  forwardFlow,
                                  forwardTrace,
                                  newSessions,
                                  reverseFlow,
                                  reverseTrace))
                      .forEach(result::add);
                }
              }));
  return result;
}
 
@Override
protected void doSetup(Context context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    mos = new MultipleOutputs(context);

    KylinConfig config;
    try {
        config = AbstractHadoopJob.loadKylinPropsAndMetadata();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    cols = config.getMrHiveDictColumnsExcludeRefColumns();


    String statPath = conf.get("partition.statistics.path");

    // get the input file name ,the file name format by colIndex-part-partitionNum, eg: 1-part-000019
    FileSplit fileSplit = (FileSplit) context.getInputSplit();
    String[] arr = fileSplit.getPath().getName().split("-");
    int partitionNum = Integer.parseInt(arr[2]);
    colIndex = Integer.parseInt(arr[0]);
    colName = cols[colIndex];
    logger.info("Input fileName:{}, colIndex:{}, colName:{}, partitionNum:{}", fileSplit.getPath().getName(), colIndex, colName, partitionNum);

    //last max dic value per column
    String lastMaxValuePath = conf.get("last.max.dic.value.path");
    logger.info("last.max.dic.value.path:" + lastMaxValuePath);
    long lastMaxDictValue = this.getLastMaxDicValue(conf, lastMaxValuePath);
    logger.info("last.max.dic.value.path:" + lastMaxValuePath + ",value=" + lastMaxDictValue);

    // Calculate the starting position of this file, the starting position of this file = sum (count) of all previous numbers + last max dic value of the column
    Map<Integer, TreeMap<Integer, Long>> allStats = getPartitionsCount(conf, statPath); //<colIndex,<reduceNum,count>>
    TreeMap<Integer, Long> partitionStats = allStats.get(colIndex);
    if (partitionNum != 0) {
        SortedMap<Integer, Long> subStat = partitionStats.subMap(0, true, partitionNum, false);
        subStat.forEach((k, v) -> {
            logger.info("Split num:{} and it's count:{}", k, v);
            start += v;
        });
    }
    start += lastMaxDictValue;
    logger.info("global dic.{}.split.num.{} build dict start offset is {}", colName, partitionNum, start);
}
 
源代码17 项目: data-prep   文件: XlsSchemaParser.java
/**
 * Return the columns metadata for the given sheet.
 *
 * @param sheet the sheet to look at.
 * @param datasetId the dataset id.
 * @return the columns metadata for the given sheet.
 */
private List<ColumnMetadata> parsePerSheet(Sheet sheet, String datasetId, FormulaEvaluator formulaEvaluator) {

    LOGGER.debug(Markers.dataset(datasetId), "parsing sheet '{}'", sheet.getSheetName());

    // Map<ColId, Map<RowId, type>>
    SortedMap<Integer, SortedMap<Integer, String>> cellsTypeMatrix =
            collectSheetTypeMatrix(sheet, formulaEvaluator);
    int averageHeaderSize = guessHeaderSize(cellsTypeMatrix);

    // here we have information regarding types for each rows/col (yup a Matrix!! :-) )
    // so we can analyse and guess metadata (column type, header value)
    final List<ColumnMetadata> columnsMetadata = new ArrayList<>(cellsTypeMatrix.size());

    cellsTypeMatrix.forEach((colId, typePerRowMap) -> {

        Type type = guessColumnType(colId, typePerRowMap, averageHeaderSize);

        String headerText = null;
        if (averageHeaderSize == 1 && sheet.getRow(0) != null) {
            // so header value is the first row of the column
            Cell headerCell = sheet.getRow(0).getCell(colId);
            headerText = XlsUtils.getCellValueAsString(headerCell, formulaEvaluator);
        }

        // header text cannot be null so use a default one
        if (StringUtils.isEmpty(headerText)) {
            // +1 because it starts from 0
            headerText = message("import.local.generated_column_name", colId + 1);
        }

        // FIXME what do we do if header size is > 1 concat all lines?
        columnsMetadata.add(ColumnMetadata.Builder //
                .column() //
                .headerSize(averageHeaderSize) //
                .name(headerText) //
                .type(type) //
                .build());

    });

    return columnsMetadata;
}
 
源代码18 项目: batfish   文件: BatfishCompressionTest.java
/**
 * Test the following invariant: if a FIB appears on concrete router “r”, then a corresponding
 * abstract FIB appears on one of these representatives. For example, if there is a concrete FIB
 * from C to D, then there should be an abstract FIB from A to B, where A is in representatives(C)
 * and B is in representatives(D).
 */
@Test
public void testCompressionFibs_diamondNetwork() throws IOException {
  HeaderSpace line =
      HeaderSpace.builder().setDstIps(ImmutableList.of(IpWildcard.parse("4.4.4.4/32"))).build();
  SortedMap<String, Configuration> origConfigs = diamondNetwork();
  Batfish batfish = getBatfish(origConfigs);
  DataPlane origDataPlane = batfish.loadDataPlane(batfish.getSnapshot());
  Topology origTopology =
      new Topology(
          batfish.getTopologyProvider().getLayer3Topology(batfish.getSnapshot()).sortedEdges());

  /* Node A should have a route with C as a next hop. */
  Map<String, Map<String, Map<Edge, IpSpace>>> origArpTrueEdge =
      origDataPlane.getForwardingAnalysis().getArpTrueEdge();
  assertThat(
      origArpTrueEdge,
      hasEntry(
          equalTo("a"),
          hasEntry(
              equalTo(Configuration.DEFAULT_VRF_NAME),
              hasKey(hasTail(isNeighborOfNode(origTopology, "c"))))));

  // compress a new copy since it will get mutated.
  SortedMap<String, Configuration> compressedConfigs =
      new TreeMap<>(compressNetwork(diamondNetwork(), line));
  DataPlane compressedDataPlane = getDataPlane(compressedConfigs);

  compressedConfigs.values().forEach(BatfishCompressionTest::assertIsCompressedConfig);

  assertThat(compressedConfigs.values(), hasSize(3));

  SortedMap<String, SortedMap<String, GenericRib<AnnotatedRoute<AbstractRoute>>>> origRibs =
      origDataPlane.getRibs();
  SortedMap<String, SortedMap<String, GenericRib<AnnotatedRoute<AbstractRoute>>>> compressedRibs =
      compressedDataPlane.getRibs();
  compressedRibs.forEach(
      (hostname, compressedRibsByVrf) ->
          compressedRibsByVrf.forEach(
              (vrf, compressedRib) -> {
                GenericRib<AnnotatedRoute<AbstractRoute>> origRib =
                    origRibs.get(hostname).get(vrf);
                Set<AbstractRoute> origRoutes = origRib.getRoutes();
                Set<AbstractRoute> compressedRoutes = compressedRib.getRoutes();
                for (AbstractRoute route : compressedRoutes) {
                  /* Every compressed route should appear in original RIB */
                  assertThat(origRoutes, hasItem(route));
                }
              }));

  /* Compression removed B or C entirely (but not both) */
  assertThat(compressedRibs, either(not(hasKey("b"))).or(not(hasKey("c"))));
  assertThat(compressedRibs, either(hasKey("b")).or(hasKey("c")));

  String remains = compressedConfigs.containsKey("b") ? "b" : "c";

  /* The remaining node is unchanged. */
  assertThat(
      origRibs.get(remains).get(Configuration.DEFAULT_VRF_NAME).getRoutes(),
      equalTo(compressedRibs.get(remains).get(Configuration.DEFAULT_VRF_NAME).getRoutes()));
}
 
源代码19 项目: batfish   文件: FlowTracer.java
/**
 * Perform a FIB lookup of {@code dstIp} on {@code fib} of {@code currentNodeName} and take
 * corresponding actions given {@code intraHopBreadcrumbs} already produced at this node. Use
 * {@code forwardOutInterfaceHandler} to handle forwarding action.
 */
@VisibleForTesting
void fibLookup(
    Ip dstIp,
    String currentNodeName,
    Fib fib,
    BiConsumer<FlowTracer, FibForward> forwardOutInterfaceHandler,
    Stack<Breadcrumb> intraHopBreadcrumbs) {
  // Loop detection
  Breadcrumb breadcrumb =
      new Breadcrumb(currentNodeName, _vrfName, _ingressInterface, _currentFlow);
  if (_breadcrumbs.contains(breadcrumb)) {
    buildLoopTrace();
    return;
  }
  if (intraHopBreadcrumbs.isEmpty()) {
    _breadcrumbs.push(breadcrumb);
  }
  try {
    Set<FibEntry> fibEntries = fib.get(dstIp);

    if (fibEntries.isEmpty()) {
      buildNoRouteTrace();
      return;
    }

    // Group traces by action (we do not want extra branching if there is branching
    // in FIB resolution)
    SortedMap<FibAction, Set<FibEntry>> groupedByFibAction =
        // Sort so that resulting traces will be in sensible deterministic order
        ImmutableSortedMap.copyOf(
            fibEntries.stream()
                .collect(Collectors.groupingBy(FibEntry::getAction, Collectors.toSet())),
            FibActionComparator.INSTANCE);

    // For every action corresponding to ECMP LPM FibEntry
    groupedByFibAction.forEach(
        ((fibAction, fibEntriesForFibAction) -> {
          forkTracerSameNode()
              .forward(
                  fibAction,
                  fibEntriesForFibAction,
                  dstIp,
                  currentNodeName,
                  forwardOutInterfaceHandler,
                  intraHopBreadcrumbs,
                  breadcrumb);
        }));
  } finally {
    if (intraHopBreadcrumbs.isEmpty()) {
      _breadcrumbs.pop();
    }
  }
}
 
源代码20 项目: batfish   文件: RoutesAnswererUtil.java
/**
 * Given a {@link Map} of all RIBs groups the routes in them by the fields of {@link RouteRowKey}
 * and further sub-groups them by {@link RouteRowSecondaryKey} and for routes in the same
 * sub-group, sorts them according to {@link RouteRowAttribute}s
 *
 * @param ribs {@link Map} of the RIBs
 * @param matchingNodes {@link Set} of nodes to be matched
 * @param network {@link Prefix}
 * @param vrfRegex Regex to filter the VRF
 * @param protocolSpec {@link RoutingProtocolSpecifier} to filter the protocols of the routes
 * @param ipOwners {@link Map} of {@link Ip} to {@link Set} of owner nodes
 * @return {@link Map} of {@link RouteRowKey}s to corresponding sub{@link Map}s of {@link
 *     RouteRowSecondaryKey} to {@link SortedSet} of {@link RouteRowAttribute}s
 */
public static <T extends AbstractRouteDecorator>
    Map<RouteRowKey, Map<RouteRowSecondaryKey, SortedSet<RouteRowAttribute>>> groupRoutes(
        SortedMap<String, SortedMap<String, GenericRib<T>>> ribs,
        Set<String> matchingNodes,
        @Nullable Prefix network,
        String vrfRegex,
        RoutingProtocolSpecifier protocolSpec,
        @Nullable Map<Ip, Set<String>> ipOwners) {
  Map<RouteRowKey, Map<RouteRowSecondaryKey, SortedSet<RouteRowAttribute>>> routesGroups =
      new HashMap<>();
  Pattern compiledVrfRegex = Pattern.compile(vrfRegex);
  ribs.forEach(
      (node, vrfMap) -> {
        if (matchingNodes.contains(node)) {
          vrfMap.forEach(
              (vrfName, rib) -> {
                if (compiledVrfRegex.matcher(vrfName).matches()) {
                  rib.getRoutes().stream()
                      .filter(
                          route ->
                              (network == null || network.equals(route.getNetwork()))
                                  && protocolSpec.getProtocols().contains(route.getProtocol()))
                      .forEach(
                          route ->
                              routesGroups
                                  .computeIfAbsent(
                                      new RouteRowKey(node, vrfName, route.getNetwork()),
                                      k -> new HashMap<>())
                                  .computeIfAbsent(
                                      new RouteRowSecondaryKey(
                                          route.getNextHopIp(),
                                          route.getProtocol().protocolName()),
                                      k -> new TreeSet<>())
                                  .add(
                                      RouteRowAttribute.builder()
                                          .setNextHop(
                                              computeNextHopNode(route.getNextHopIp(), ipOwners))
                                          .setNextHopInterface(route.getNextHopInterface())
                                          .setAdminDistance(route.getAdministrativeCost())
                                          .setMetric(route.getMetric())
                                          .setTag(route.getTag())
                                          .build()));
                }
              });
        }
      });
  return routesGroups;
}