下面列出了com.google.common.collect.Multimap#values ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public static void main(String[] args) {
Multimap<String,String> myNultimap = ArrayListMultimap.create();
myNultimap.put("Fruits","Bannana");
myNultimap.put("Fruits","Apple");
myNultimap.put("Fruits","Pear");
myNultimap.put("Vegetables","Carrot");
int size = myNultimap.size();
System.out.println(size);
Collection<String> fruits = myNultimap.get("Fruits");
System.out.println(fruits);
Collection<String> fruitsCol = myNultimap.get("Vegetables");
System.out.println(fruitsCol);
for (String value : myNultimap.values()){
System.out.println(value);
}
myNultimap.removeAll("Fruits");
System.out.println(myNultimap.get("Fruits"));
}
public Collection<IdeExtendedRepoFileDependency> extractRepoFileDependencies(DependencyHandler dependencyHandler, Collection<Configuration> plusConfigurations, Collection<Configuration> minusConfigurations, boolean downloadSources, boolean downloadJavadoc) {
// can have multiple IDE dependencies with same component identifier (see GRADLE-1622)
Multimap<ComponentIdentifier, IdeExtendedRepoFileDependency> resolvedDependenciesComponentMap = LinkedHashMultimap.create();
for (IdeExtendedRepoFileDependency dep : resolvedExternalDependencies(plusConfigurations, minusConfigurations)) {
resolvedDependenciesComponentMap.put(toComponentIdentifier(dep.getId()), dep);
}
List<Class<? extends Artifact>> artifactTypes = new ArrayList<Class<? extends Artifact>>(2);
if (downloadSources) {
artifactTypes.add(SourcesArtifact.class);
}
if (downloadJavadoc) {
artifactTypes.add(JavadocArtifact.class);
}
downloadAuxiliaryArtifacts(dependencyHandler, resolvedDependenciesComponentMap, artifactTypes);
Collection<UnresolvedIdeRepoFileDependency> unresolvedDependencies = unresolvedExternalDependencies(plusConfigurations, minusConfigurations);
Collection<IdeExtendedRepoFileDependency> resolvedDependencies = resolvedDependenciesComponentMap.values();
Collection<IdeExtendedRepoFileDependency> resolvedAndUnresolved = new ArrayList<IdeExtendedRepoFileDependency>(unresolvedDependencies.size() + resolvedDependencies.size());
resolvedAndUnresolved.addAll(resolvedDependencies);
resolvedAndUnresolved.addAll(unresolvedDependencies);
return resolvedAndUnresolved;
}
@Test
public void testGetEventFieldsGROUPED() throws Exception {
JsonIngestHelper ingestHelper = init(initConfig(FlattenMode.GROUPED));
RawRecordContainer event = new RawRecordContainerImpl();
event.setDate((new Date()).getTime());
event.setRawData(testRecord);
event.generateId(null);
Assert.assertNotNull(ingestHelper.getEmbeddedHelper());
Multimap<String,NormalizedContentInterface> fieldMap = ingestHelper.getEventFields(event);
Assert.assertEquals(12, fieldMap.keySet().size());
Assert.assertEquals(16, fieldMap.values().size());
Assert.assertTrue(fieldMap.containsKey("NESTED"));
Assert.assertFalse(fieldMap.containsKey("HEADER_DATE"));
Assert.assertTrue(fieldMap.containsKey("HEADERDATE"));
for (NormalizedContentInterface field : fieldMap.values()) {
if (((NormalizedFieldAndValue) field).isGrouped()) {
Assert.assertEquals("NESTED", field.getIndexedFieldName());
} else {
Assert.assertFalse(((NormalizedFieldAndValue) field).isGrouped());
}
}
}
protected Iterable<IEObjectDescription> getAliasedElements(Iterable<IEObjectDescription> candidates) {
Multimap<QualifiedName, IEObjectDescription> keyToDescription = LinkedHashMultimap.create();
Multimap<QualifiedName, ImportNormalizer> keyToNormalizer = HashMultimap.create();
for (IEObjectDescription imported : candidates) {
QualifiedName fullyQualifiedName = imported.getName();
for (ImportNormalizer normalizer : normalizers) {
QualifiedName alias = normalizer.deresolve(fullyQualifiedName);
if (alias != null) {
QualifiedName key = alias;
if (isIgnoreCase()) {
key = key.toLowerCase();
}
keyToDescription.put(key, new AliasedEObjectDescription(alias, imported));
keyToNormalizer.put(key, normalizer);
}
}
}
for (QualifiedName name : keyToNormalizer.keySet()) {
if (keyToNormalizer.get(name).size() > 1)
keyToDescription.removeAll(name);
}
return keyToDescription.values();
}
@Override
public Map<String, Type[]> argumentTypes() {
try {
Multimap<String, IExecutableCommandService> registeredCommands = new ReflectExtensions().get(this,
"registeredCommands");
Map<String, Type[]> result = new HashMap<>();
for (IExecutableCommandService service : new HashSet<>(registeredCommands.values())) {
if (service instanceof ExecuteCommandParamsDescriber) {
result.putAll(((ExecuteCommandParamsDescriber) service).argumentTypes());
}
}
return result;
} catch (SecurityException | NoSuchFieldException | IllegalArgumentException | IllegalAccessException e) {
return Collections.emptyMap();
}
}
@Override
public SchemaListenerRegistration registerSchemaSourceListener(final SchemaSourceListener listener) {
final SchemaListenerRegistration ret = new AbstractSchemaListenerRegistration(listener) {
@Override
protected void removeRegistration() {
listeners.remove(this);
}
};
synchronized (this) {
final Collection<PotentialSchemaSource<?>> col = new ArrayList<>();
for (Multimap<Class<? extends SchemaSourceRepresentation>, AbstractSchemaSourceRegistration<?>> m
: sources.values()) {
for (AbstractSchemaSourceRegistration<?> r : m.values()) {
col.add(r.getInstance());
}
}
// Notify first, so translator-type listeners, who react by registering a source
// do not cause infinite loop.
listener.schemaSourceRegistered(col);
listeners.add(ret);
}
return ret;
}
public Collection<IdeExtendedRepoFileDependency> extractRepoFileDependencies(DependencyHandler dependencyHandler, Collection<Configuration> plusConfigurations, Collection<Configuration> minusConfigurations, boolean downloadSources, boolean downloadJavadoc) {
// can have multiple IDE dependencies with same component identifier (see GRADLE-1622)
Multimap<ComponentIdentifier, IdeExtendedRepoFileDependency> resolvedDependenciesComponentMap = LinkedHashMultimap.create();
for (IdeExtendedRepoFileDependency dep : resolvedExternalDependencies(plusConfigurations, minusConfigurations)) {
resolvedDependenciesComponentMap.put(toComponentIdentifier(dep.getId()), dep);
}
List<Class<? extends Artifact>> artifactTypes = new ArrayList<Class<? extends Artifact>>(2);
if (downloadSources) {
artifactTypes.add(SourcesArtifact.class);
}
if (downloadJavadoc) {
artifactTypes.add(JavadocArtifact.class);
}
downloadAuxiliaryArtifacts(dependencyHandler, resolvedDependenciesComponentMap, artifactTypes);
Collection<UnresolvedIdeRepoFileDependency> unresolvedDependencies = unresolvedExternalDependencies(plusConfigurations, minusConfigurations);
Collection<IdeExtendedRepoFileDependency> resolvedDependencies = resolvedDependenciesComponentMap.values();
Collection<IdeExtendedRepoFileDependency> resolvedAndUnresolved = new ArrayList<IdeExtendedRepoFileDependency>(unresolvedDependencies.size() + resolvedDependencies.size());
resolvedAndUnresolved.addAll(resolvedDependencies);
resolvedAndUnresolved.addAll(unresolvedDependencies);
return resolvedAndUnresolved;
}
Map<String, Model> modelsFromApiListings(Multimap<String, ApiListing> apiListings) {
Map<String, springfox.documentation.schema.Model> definitions = newTreeMap();
for (ApiListing each : apiListings.values()) {
definitions.putAll(each.getModels());
}
return modelMapper.mapModels(definitions);
}
private boolean isBrokerAvailableForRebalancing(String bundleName, long maxLoadLevel) {
NamespaceName namespaceName = NamespaceName.get(LoadManagerShared.getNamespaceNameFromBundleName(bundleName));
Map<Long, Set<ResourceUnit>> availableBrokers = sortedRankings.get();
// this does not have "http://" in front, hacky but no time to pretty up
Multimap<Long, ResourceUnit> brokers = getFinalCandidates(namespaceName, availableBrokers);
for (Object broker : brokers.values()) {
ResourceUnit underloadedRU = (ResourceUnit) broker;
LoadReport currentLoadReport = currentLoadReports.get(underloadedRU);
if (isBelowLoadLevel(currentLoadReport.getSystemResourceUsage(), maxLoadLevel)) {
return true;
}
}
return false;
}
private static Multimap<String, UnitProxy> filteredUnits(Multimap<String, UnitProxy> units, final List<String> fullUnitNames) {
Multimap<String, UnitProxy> results = ArrayListMultimap.create();
for (UnitProxy unitProxy : units.values()) {
for (String unitFullName : fullUnitNames) {
if (Objects.equals(Unit.fullName(unitProxy), unitFullName)) {
results.put(unitProxy.getGroup().getName(), unitProxy);
}
}
}
return results;
}
private static void checkReferencedModulesExist(Multimap<String, String> moduleDependenciesMap) {
for (String referencedModule : moduleDependenciesMap.values()) {
if (!moduleDependenciesMap.containsKey(referencedModule)) {
throw InvalidBundleException.builder()
.withUserMessage(
"Module '%s' is referenced by <uses-split> but does not exist.", referencedModule)
.build();
}
}
}
@Override
protected void invokeInternal(WorkflowContext ctx, ProgressMonitor monitor,
Issues issues) {
ResourceSet resourceSet = getResourceSet();
// due to some Xcore peculiarity we have to access the IAllContainerState here
// to trigger some lazy init logic
IAllContainersState allContainerState = (IAllContainersState) EcoreUtil.getAdapter(resourceSet.eAdapters(),
IAllContainersState.class);
allContainerState.isEmpty("");
Multimap<String, URI> uris = getPathTraverser().resolvePathes(pathes,
new Predicate<URI>() {
@Override
public boolean apply(URI input) {
return input.fileExtension().equals(XCORE_FILE_EXT);
}
});
List<Resource> resources = new ArrayList<>();
for (URI uri : uris.values()) {
LOGGER.info(uri);
try {
resources.add(parse(uri, resourceSet));
} catch (Exception e) {
LOGGER.error("Problem during loading of resource @ " + uri, e);
}
}
installIndex(resourceSet);
for (Resource r : resources) {
EcoreUtil.resolveAll(r);
for (Diagnostic x : r.getErrors()) {
issues.addError(x.getMessage(), x);
}
}
ctx.set(slot, resources);
}
public ResourceSet getInitializedResourceSet(List<String> pathes, UriFilter filter) {
ResourceSet resourceSet = resourceSetProvider.get();
Multimap<String, URI> pathToUriMap = getPathToUriMap(pathes, filter);
IAllContainersState containersState = factory.getContainersState(pathes, pathToUriMap);
resourceSet.eAdapters().add(new DelegatingIAllContainerAdapter(containersState));
for (URI uri : pathToUriMap.values()) {
resourceSet.createResource(uri);
}
return resourceSet;
}
Map<String, Model> modelsFromApiListings(ModelMapper modelMapper, Multimap<String, ApiListing> apiListings)
{
Map<String, springfox.documentation.schema.Model> definitions = newTreeMap();
for (ApiListing each : apiListings.values())
{
// System.out.println(each.getModels());
// System.out.println(each.getModels().get("BusinessUser").getBaseModel());
// System.out.println(each.getModels().get("BusinessUser").getExample());
// System.out.println(each.getModels().get("BusinessUser").getName());
// System.out.println(each.getModels().get("BusinessUser").getProperties());
// System.out.println(each.getModels().get("BusinessUser").getProperties().get("realName").getDefaultValue());
definitions.putAll(each.getModels());
}
return modelMapper.mapModels(definitions);
}
private static Multimap<EntsoeGeographicalCode, String> checkedFormats(Multimap<EntsoeGeographicalCode, String> forbiddenFormatsByGeographicalCode,
Collection<String> supportedFormats) {
// check that formats are valids
for (String format : forbiddenFormatsByGeographicalCode.values()) {
if (!supportedFormats.contains(format)) {
throw new IllegalArgumentException("Unsupported import format " + format);
}
}
return forbiddenFormatsByGeographicalCode;
}
private void assertWatchKeys(Multimap<String, String> watchKeysMap, DeferredResult deferredResult) {
for (String watchKey : watchKeysMap.values()) {
Collection<DeferredResultWrapper> deferredResultWrappers = deferredResults.get(watchKey);
boolean found = false;
for (DeferredResultWrapper wrapper: deferredResultWrappers) {
if (Objects.equals(wrapper.getResult(), deferredResult)) {
found = true;
}
}
assertTrue(found);
}
}
/**
* Remove a node that has died, attempting to restore the replica count.
* If the node is alive, decommission should be attempted. If decommission
* fails, then removeToken should be called. If we fail while trying to
* restore the replica count, finally forceRemoveCompleteion should be
* called to forcibly remove the node without regard to replica count.
*
* @param hostIdString token for the node
*/
public void removeNode(String hostIdString)
{
InetAddress myAddress = FBUtilities.getBroadcastAddress();
UUID localHostId = tokenMetadata.getHostId(myAddress);
UUID hostId = UUID.fromString(hostIdString);
InetAddress endpoint = tokenMetadata.getEndpointForHostId(hostId);
if (endpoint == null)
throw new UnsupportedOperationException("Host ID not found.");
Collection<Token> tokens = tokenMetadata.getTokens(endpoint);
if (endpoint.equals(myAddress))
throw new UnsupportedOperationException("Cannot remove self");
if (Gossiper.instance.getLiveMembers().contains(endpoint))
throw new UnsupportedOperationException("Node " + endpoint + " is alive and owns this ID. Use decommission command to remove it from the ring");
// A leaving endpoint that is dead is already being removed.
if (tokenMetadata.isLeaving(endpoint))
logger.warn("Node {} is already being removed, continuing removal anyway", endpoint);
if (!replicatingNodes.isEmpty())
throw new UnsupportedOperationException("This node is already processing a removal. Wait for it to complete, or use 'removenode force' if this has failed.");
// Find the endpoints that are going to become responsible for data
for (String keyspaceName : Schema.instance.getNonSystemKeyspaces())
{
// if the replication factor is 1 the data is lost so we shouldn't wait for confirmation
if (Keyspace.open(keyspaceName).getReplicationStrategy().getReplicationFactor() == 1)
continue;
// get all ranges that change ownership (that is, a node needs
// to take responsibility for new range)
Multimap<Range<Token>, InetAddress> changedRanges = getChangedRangesForLeaving(keyspaceName, endpoint);
IFailureDetector failureDetector = FailureDetector.instance;
for (InetAddress ep : changedRanges.values())
{
if (failureDetector.isAlive(ep))
replicatingNodes.add(ep);
else
logger.warn("Endpoint {} is down and will not receive data for re-replication of {}", ep, endpoint);
}
}
removingNode = endpoint;
tokenMetadata.addLeavingEndpoint(endpoint);
PendingRangeCalculatorService.instance.update();
// the gossiper will handle spoofing this node's state to REMOVING_TOKEN for us
// we add our own token so other nodes to let us know when they're done
Gossiper.instance.advertiseRemoving(endpoint, hostId, localHostId);
// kick off streaming commands
restoreReplicaCount(endpoint, myAddress);
// wait for ReplicationFinishedVerbHandler to signal we're done
while (!replicatingNodes.isEmpty())
{
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
excise(tokens, endpoint);
// gossiper will indicate the token has left
Gossiper.instance.advertiseTokenRemoved(endpoint, hostId);
replicatingNodes.clear();
removingNode = null;
}
private void killWallsAndChoppers(List<Blast> blasts) {
// собираем все разрушаемые стенки которые уже есть в радиусе
// надо определить кто кого чем кикнул (ызрывные волны могут пересекаться)
List<Wall> all = walls.listSubtypes(Wall.class);
Multimap<Hero, Wall> deathMatch = HashMultimap.create();
for (Blast blast : blasts) {
Hero hunter = blast.owner();
int index = all.indexOf(blast);
if (index != -1) {
Wall wall = all.get(index);
deathMatch.put(hunter, wall);
}
}
// у нас есть два списка, прибитые стенки
// и те, благодаря кому они разрушены
Set<Wall> preys = new HashSet<>(deathMatch.values());
Set<Hero> hunters = new HashSet<>(deathMatch.keys());
// вначале прибиваем стенки
preys.forEach(wall -> {
if (wall instanceof MeatChopperHunter) {
((MeatChopperHunter)wall).die();
} else {
destroyedWalls.add(wall);
}
});
// а потом все виновники получают свои ачивки
hunters.forEach(hunter -> {
if (!hunter.hasPlayer()) {
return;
}
deathMatch.get(hunter).forEach(wall -> {
if (wall instanceof MeatChopper) {
hunter.event(Events.KILL_MEAT_CHOPPER);
} else if (wall instanceof DestroyWall) {
hunter.event(Events.KILL_DESTROY_WALL);
}
});
});
}
public BaseDepthFilter(final double minDepthPercentage, final double maxDepthPercentage,
@NotNull final Multimap<Chromosome, BaseDepth> evidence) {
this(minDepthPercentage, maxDepthPercentage, evidence.values());
}
@Override
public ScanRangeSplits getScanRangeSplits(String placementName, int desiredRecordsPerSplit, Optional<ScanRange> subrange) {
checkNotNull(placementName, "placement");
checkArgument(desiredRecordsPerSplit >= 0, "Min records per split too low");
DeltaPlacement placement = (DeltaPlacement) _placementCache.get(placementName);
CassandraKeyspace keyspace = placement.getKeyspace();
ColumnFamily<ByteBuffer, DeltaKey> cf = placement.getBlockedDeltaColumnFamily();
// Get the topology so the splits can be grouped by rack
Multimap<String, TokenRange> racks = describeCassandraTopology(keyspace.getAstyanaxKeyspace());
Collection<TokenRange> allTokenRanges = racks.values();
ScanRangeSplits.Builder builder = ScanRangeSplits.builder();
for (Map.Entry<String, Collection<TokenRange>> entry : racks.asMap().entrySet()) {
String rack = entry.getKey();
Collection<TokenRange> tokenRanges = entry.getValue();
for (TokenRange tokenRange : tokenRanges) {
if (subrange.isPresent()) {
// Find the intersecting token ranges (if any) and add the splits for the intersection
ByteBuffer rangeStart = parseTokenString(tokenRange.getStartToken());
ByteBuffer rangeEnd = parseTokenString(tokenRange.getEndToken());
List<ScanRange> intersections = ScanRange.create(rangeStart, rangeEnd).intersection(subrange.get());
for (ScanRange scanRange : intersections) {
TokenRange intersectingTokenRange = new TokenRangeImpl(
toTokenString(scanRange.getFrom()), toTokenString(scanRange.getTo()), tokenRange.getEndpoints());
addScanRangeSplitsForTokenRange(keyspace, cf, rack, intersectingTokenRange,
desiredRecordsPerSplit, allTokenRanges, builder);
}
} else {
// Add splits for the entire token range
addScanRangeSplitsForTokenRange(keyspace, cf, rack, tokenRange, desiredRecordsPerSplit,
allTokenRanges, builder);
}
}
}
return builder.build();
}