com.google.common.collect.SortedSetMultimap#put ( )源码实例Demo

下面列出了com.google.common.collect.SortedSetMultimap#put ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: sailfish-core   文件: HtmlReport.java
private SortedSetMultimap<Category, String> toMultimap(Set<BugDescription> bugSet) {
    SortedSetMultimap<Category, String> bugMap = SortedSetMultimapBuilder.hashKeys().treeSetValues().build();
    for (BugDescription bugDescription : bugSet) {
        bugMap.put(bugDescription.getCategories(), bugDescription.getSubject().toUpperCase());
    }
    return bugMap;
}
 
源代码2 项目: n4js   文件: ReportUtils.java
/** Creates a histogram of the given collections */
static public <E extends Comparable<E>, T extends Comparable<T>> Multimap<T, E> getHistogram(Collection<E> elems,
		Function<E, T> pivot) {

	final SortedSetMultimap<T, E> histogram = TreeMultimap.create();
	for (E elem : elems) {
		T t = pivot.apply(elem);
		histogram.put(t, elem);
	}
	return histogram;
}
 
源代码3 项目: ArchUnit   文件: SliceCycleArchCondition.java
private SortedSetMultimap<Slice, Dependency> targetsOf(Slice slice,
        ClassesToSlicesMapping classesToSlicesMapping, DescribedPredicate<Dependency> predicate) {
    SortedSetMultimap<Slice, Dependency> result = hashKeys().treeSetValues().build();
    for (Dependency dependency : Guava.Iterables.filter(slice.getDependenciesFromSelf(), predicate)) {
        if (classesToSlicesMapping.containsKey(dependency.getTargetClass())) {
            result.put(classesToSlicesMapping.get(dependency.getTargetClass()), dependency);
        }
    }
    return result;
}
 
源代码4 项目: hmftools   文件: HmfGenePanelSupplier.java
@NotNull
private static SortedSetMultimap<String, HmfTranscriptRegion> toSortedMap(@NotNull List<HmfTranscriptRegion> regions) {
    SortedSetMultimap<String, HmfTranscriptRegion> regionMap = TreeMultimap.create();
    for (HmfTranscriptRegion region : regions) {
        regionMap.put(region.chromosome(), region);
    }

    return regionMap;
}
 
源代码5 项目: hmftools   文件: BEDFileLoader.java
@NotNull
public static SortedSetMultimap<String, GenomeRegion> fromBedFile(@NotNull String bedFile) throws IOException {
    final SortedSetMultimap<String, GenomeRegion> regionMap = TreeMultimap.create();

    String prevChromosome = null;
    GenomeRegion prevRegion = null;
    try (final AbstractFeatureReader<BEDFeature, LineIterator> reader = getFeatureReader(bedFile, new BEDCodec(), false)) {
        for (final BEDFeature bedFeature : reader.iterator()) {
            final String chromosome = bedFeature.getContig();
            final long start = bedFeature.getStart();
            final long end = bedFeature.getEnd();

            if (end < start) {
                LOGGER.warn("Invalid genome region found in chromosome {}: start={}, end={}", chromosome, start, end);
            } else {
                final GenomeRegion region = GenomeRegions.create(chromosome, start, end);
                if (prevRegion != null && chromosome.equals(prevChromosome) && prevRegion.end() >= start) {
                    LOGGER.warn("BED file is not sorted, please fix! Current={}, Previous={}", region, prevRegion);
                } else {
                    regionMap.put(chromosome, region);
                    prevChromosome = chromosome;
                    prevRegion = region;
                }
            }
        }
    }

    return regionMap;
}
 
源代码6 项目: hmftools   文件: BidirectionalSlicerTest.java
@Before
public void setup() {
    final SortedSetMultimap<String, GenomeRegion> regionMap = TreeMultimap.create();
    regionMap.put("X", GenomeRegions.create("X", 100, 200));
    regionMap.put("X", GenomeRegions.create("X", 300, 400));
    regionMap.put("Y", GenomeRegions.create("Y", 500, 600));

    slicer = new BidirectionalSlicer(regionMap);
}
 
源代码7 项目: datacollector   文件: TableRuntimeContext.java
public static SortedSetMultimap<TableContext, TableRuntimeContext> initializeAndUpgradeFromV1Offsets(
    Map<String, TableContext> tableContextMap,
    Map<String, String> offsets,
    Set<String> offsetKeysToRemove
) throws StageException {
  SortedSetMultimap<TableContext, TableRuntimeContext> returnMap = buildSortedPartitionMap();

  for (Map.Entry<String, TableContext> tableEntry : tableContextMap.entrySet()) {
    final String tableName = tableEntry.getKey();
    final TableContext tableContext = tableEntry.getValue();

    Map<String, String> startingOffsets;
    String offsetValue = null;
    Map<String, String> storedOffsets = null;
    if (offsets.containsKey(tableName)) {
      offsetValue = offsets.remove(tableName);
      storedOffsets = OffsetQueryUtil.validateStoredAndSpecifiedOffset(tableContext, offsetValue);

      offsetKeysToRemove.add(tableName);

      startingOffsets = OffsetQueryUtil.getOffsetsFromSourceKeyRepresentation(offsetValue);
      tableContext.getOffsetColumnToStartOffset().putAll(startingOffsets);
    }

    final TableRuntimeContext partition = createInitialPartition(tableContext, storedOffsets);
    returnMap.put(tableContext, partition);

    if (offsetValue != null) {
      offsets.put(partition.getOffsetKey(), offsetValue);
    }
  }

  return returnMap;
}
 
/**
 * Checks whether any tables have had partitioning turned off or not, and updates the partition map appropriately
 *
 * @param reconstructedPartitions the reconstructed partitions (may be modified)
 */
private void handlePartitioningTurnedOffOrOn(
    SortedSetMultimap<TableContext, TableRuntimeContext> reconstructedPartitions
) {

  for (TableContext tableContext : reconstructedPartitions.keySet()) {
    final SortedSet<TableRuntimeContext> partitions = reconstructedPartitions.get(tableContext);
    final TableRuntimeContext lastPartition = partitions.last();
    final TableContext sourceTableContext = lastPartition.getSourceTableContext();
    Utils.checkState(
        sourceTableContext.equals(tableContext),
        String.format(
            "Source table context for %s should match TableContext map key of %s",
            lastPartition.getDescription(),
            tableContext.getQualifiedName()
        )
    );

    final boolean partitioningTurnedOff = lastPartition.isPartitioned()
        && sourceTableContext.getPartitioningMode() == PartitioningMode.DISABLED;
    final boolean partitioningTurnedOn = !lastPartition.isPartitioned()
        && sourceTableContext.isPartitionable()
        && sourceTableContext.getPartitioningMode() != PartitioningMode.DISABLED;

    if (!partitioningTurnedOff && !partitioningTurnedOn) {
      continue;
    }

    final Map<String, String> nextStartingOffsets = new HashMap<>();
    final Map<String, String> nextMaxOffsets = new HashMap<>();

    final int newPartitionSequence = lastPartition.getPartitionSequence() > 0 ? lastPartition.getPartitionSequence() + 1 : 1;
    if (partitioningTurnedOff) {
      LOG.info(
          "Table {} has switched from partitioned to non-partitioned; partition sequence {} will be the last (with" +
              " no max offsets)",
          sourceTableContext.getQualifiedName(),
          newPartitionSequence
      );

      lastPartition.getPartitionOffsetStart().forEach(
          (col, off) -> {
            String basedOnStartOffset = lastPartition.generateNextPartitionOffset(col, off);
            nextStartingOffsets.put(col, basedOnStartOffset);
          }
      );

    } else if (partitioningTurnedOn) {

      lastPartition.getPartitionOffsetStart().forEach(
          (col, off) -> {
            String basedOnStoredOffset = lastPartition.getInitialStoredOffsets().get(col);
            nextStartingOffsets.put(col, basedOnStoredOffset);
          }
      );

      nextStartingOffsets.forEach(
          (col, off) -> nextMaxOffsets.put(col, lastPartition.generateNextPartitionOffset(col, off))
      );

      if (!reconstructedPartitions.remove(sourceTableContext, lastPartition)) {
        throw new IllegalStateException(String.format(
            "Failed to remove partition %s for table %s in switching partitioning from off to on",
            lastPartition.getDescription(),
            sourceTableContext.getQualifiedName()
        ));
      }

      LOG.info(
          "Table {} has switched from non-partitioned to partitioned; using last stored offsets as the starting" +
              " offsets for the new partition {}",
          sourceTableContext.getQualifiedName(),
          newPartitionSequence
      );
    }

    final TableRuntimeContext nextPartition = new TableRuntimeContext(
        sourceTableContext,
        lastPartition.isUsingNonIncrementalLoad(),
        (lastPartition.isPartitioned() && !partitioningTurnedOff) || partitioningTurnedOn,
        newPartitionSequence,
        nextStartingOffsets,
        nextMaxOffsets
    );

    reconstructedPartitions.put(sourceTableContext, nextPartition);
  }
}
 
源代码9 项目: s3proxy   文件: AwsSignature.java
/**
 * Create Amazon V2 signature.  Reference:
 * http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
 */
static String createAuthorizationSignature(
        HttpServletRequest request, String uri, String credential,
        boolean queryAuth, boolean bothDateHeader) {
    // sort Amazon headers
    SortedSetMultimap<String, String> canonicalizedHeaders =
            TreeMultimap.create();
    for (String headerName : Collections.list(request.getHeaderNames())) {
        Collection<String> headerValues = Collections.list(
                request.getHeaders(headerName));
        headerName = headerName.toLowerCase();
        if (!headerName.startsWith("x-amz-") || (bothDateHeader &&
              headerName.equalsIgnoreCase(AwsHttpHeaders.DATE))) {
            continue;
        }
        if (headerValues.isEmpty()) {
            canonicalizedHeaders.put(headerName, "");
        }
        for (String headerValue : headerValues) {
            canonicalizedHeaders.put(headerName,
                    Strings.nullToEmpty(headerValue));
        }
    }

    // Build string to sign
    StringBuilder builder = new StringBuilder()
            .append(request.getMethod())
            .append('\n')
            .append(Strings.nullToEmpty(request.getHeader(
                    HttpHeaders.CONTENT_MD5)))
            .append('\n')
            .append(Strings.nullToEmpty(request.getHeader(
                    HttpHeaders.CONTENT_TYPE)))
            .append('\n');
    String expires = request.getParameter("Expires");
    if (queryAuth) {
        // If expires is not nil, then it is query string sign
        // If expires is nil, maybe also query string sign
        // So should check other accessid param, presign to judge.
        // not the expires
        builder.append(Strings.nullToEmpty(expires));
    }  else {
        if (!bothDateHeader) {
            if (canonicalizedHeaders.containsKey(AwsHttpHeaders.DATE)) {
                builder.append("");
            } else {
                builder.append(request.getHeader(HttpHeaders.DATE));
            }
        }  else {
            if (!canonicalizedHeaders.containsKey(AwsHttpHeaders.DATE)) {
                builder.append(request.getHeader(AwsHttpHeaders.DATE));
            }  else {
                // panic
            }
        }
    }

    builder.append('\n');
    for (Map.Entry<String, String> entry : canonicalizedHeaders.entries()) {
        builder.append(entry.getKey()).append(':')
                .append(entry.getValue()).append('\n');
    }
    builder.append(uri);

    char separator = '?';
    List<String> subresources = Collections.list(
            request.getParameterNames());
    Collections.sort(subresources);
    for (String subresource : subresources) {
        if (SIGNED_SUBRESOURCES.contains(subresource)) {
            builder.append(separator).append(subresource);

            String value = request.getParameter(subresource);
            if (!"".equals(value)) {
                builder.append('=').append(value);
            }
            separator = '&';
        }
    }

    String stringToSign = builder.toString();
    logger.trace("stringToSign: {}", stringToSign);

    // Sign string
    Mac mac;
    try {
        mac = Mac.getInstance("HmacSHA1");
        mac.init(new SecretKeySpec(credential.getBytes(
                StandardCharsets.UTF_8), "HmacSHA1"));
    } catch (InvalidKeyException | NoSuchAlgorithmException e) {
        throw new RuntimeException(e);
    }
    return BaseEncoding.base64().encode(mac.doFinal(
            stringToSign.getBytes(StandardCharsets.UTF_8)));
}
 
源代码10 项目: googleads-java-lib   文件: GetAccountHierarchy.java
/**
 * Runs the example.
 *
 * @param adWordsServices the services factory.
 * @param session the session.
 * @throws ApiException if the API request failed with one or more service errors.
 * @throws RemoteException if the API request failed due to other errors.
 */
public static void runExample(AdWordsServicesInterface adWordsServices, AdWordsSession session)
    throws RemoteException {
  // Get the ServicedAccountService.
  ManagedCustomerServiceInterface managedCustomerService =
      adWordsServices.get(session, ManagedCustomerServiceInterface.class);

  // Create selector builder.
  int offset = 0;
  SelectorBuilder selectorBuilder =
      new SelectorBuilder()
          .fields(ManagedCustomerField.CustomerId, ManagedCustomerField.Name)
          .offset(offset)
          .limit(PAGE_SIZE);

  // Get results.
  ManagedCustomerPage page;

  // Map from customerId to customer node.
  Map<Long, ManagedCustomerTreeNode> customerIdToCustomerNode = Maps.newHashMap();

  // Map from each parent customer ID to its set of linked child customer IDs.
  SortedSetMultimap<Long, Long> parentIdToChildIds = TreeMultimap.create();
  do {
    page = managedCustomerService.get(selectorBuilder.build());

    if (page.getEntries() != null) {
      // Create account tree nodes for each customer.
      for (ManagedCustomer customer : page.getEntries()) {
        ManagedCustomerTreeNode node = new ManagedCustomerTreeNode();
        node.account = customer;
        customerIdToCustomerNode.put(customer.getCustomerId(), node);
      }

      // Update the map of parent customer ID to child customer IDs.
      if (page.getLinks() != null) {
        for (ManagedCustomerLink link : page.getLinks()) {
          parentIdToChildIds.put(link.getManagerCustomerId(), link.getClientCustomerId());
        }
      }
    }
    offset += PAGE_SIZE;
    selectorBuilder.increaseOffsetBy(PAGE_SIZE);
  } while (offset < page.getTotalNumEntries());

  // Update the parentNode of each child node, and add each child to the childAccounts
  // of its parentNode.
  for (Entry<Long, Long> parentIdEntry : parentIdToChildIds.entries()) {
    ManagedCustomerTreeNode parentNode = customerIdToCustomerNode.get(parentIdEntry.getKey());
    ManagedCustomerTreeNode childNode = customerIdToCustomerNode.get(parentIdEntry.getValue());
    childNode.parentNode = parentNode;
    parentNode.childAccounts.add(childNode);
  }

  // Find the root account node in the tree.
  ManagedCustomerTreeNode rootNode =
      customerIdToCustomerNode.values().stream()
          .filter(node -> node.parentNode == null)
          .findFirst()
          .orElse(null);

  // Display serviced account graph.
  if (rootNode != null) {
    // Display account tree.
    System.out.println("CustomerId, Name");
    System.out.println(rootNode.toTreeString(0, new StringBuffer()));
  } else {
    System.out.println("No serviced accounts were found.");
  }
}