下面列出了java.util.Map#putIfAbsent ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@SuppressFBWarnings(value = "RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE",
justification = "https://github.com/spotbugs/spotbugs/issues/600")
private static Collection<YangTextSchemaSource> toUniqueSources(final Collection<ScannedDependency> dependencies)
throws IOException {
final Map<String, YangTextSchemaSource> byContent = new HashMap<>();
for (ScannedDependency dependency : dependencies) {
for (YangTextSchemaSource s : dependency.sources()) {
try (Reader reader = s.asCharSource(StandardCharsets.UTF_8).openStream()) {
final String contents = CharStreams.toString(reader);
byContent.putIfAbsent(contents, s);
}
}
}
return byContent.values();
}
public static <T, U, V extends U> U getOrCreate(Map<T, U> map, Class<V> clazz, T key) {
U existing = map.get(key);
if (existing != null) {
return existing;
}
try {
U toPut = clazz.newInstance();
existing = map.putIfAbsent(key, toPut);
if (existing == null) {
return toPut;
}
return existing;
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
/**
* Scan through topics to check whether the topic having partition(s) with bad replication factor. For each topic, the
* target replication factor to check against is the maximum value of {@link #SELF_HEALING_TARGET_TOPIC_REPLICATION_FACTOR_CONFIG}
* and topic's minISR plus value of {@link #TOPIC_REPLICATION_FACTOR_MARGIN_CONFIG}.
*
* @param topicsToCheck Set of topics to check.
* @return Map of detected topic replication factor anomaly entries by target replication factor.
*/
private Map<Short, Set<TopicReplicationFactorAnomalyEntry>> populateBadTopicsByReplicationFactor(Set<String> topicsToCheck, Cluster cluster) {
Map<Short, Set<TopicReplicationFactorAnomalyEntry>> topicsByReplicationFactor = new HashMap<>();
for (String topic : topicsToCheck) {
if (_cachedTopicMinISR.containsKey(topic)) {
short topicMinISR = _cachedTopicMinISR.get(topic).minISR();
short targetReplicationFactor = (short) Math.max(_targetReplicationFactor, topicMinISR + _topicReplicationFactorMargin);
int violatedPartitionCount = 0;
for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
if (partitionInfo.replicas().length != targetReplicationFactor) {
violatedPartitionCount++;
}
}
if (violatedPartitionCount > 0) {
topicsByReplicationFactor.putIfAbsent(targetReplicationFactor, new HashSet<>());
topicsByReplicationFactor.get(targetReplicationFactor).add(
new TopicReplicationFactorAnomalyEntry(topic, (double) violatedPartitionCount / cluster.partitionCountForTopic(topic)));
}
}
}
return topicsByReplicationFactor;
}
private Map<String, BaseSetting> initialize(BaseSetting... settings) {
final Map<String, BaseSetting> map = new LinkedHashMap<>();
for (final BaseSetting setting : settings) {
for (final String name : setting.getNames()) {
if (map.putIfAbsent(name, setting) != null) {
DEFAULT_LOGGER.error("Setting name collision between {} and {}",
name, map.get(name).getClass().getSimpleName());
}
}
}
DEFAULT_LOGGER.info("{} settings initialized", settings.length);
return Collections.unmodifiableMap(map);
}
private Map<Endpoint, List<Method>> addGloballyDisabledEndpoints(Map<Endpoint, List<Method>> endpoints) {
if(globallyDisabledEndpoints != null && !globallyDisabledEndpoints.isEmpty()) {
Set<Endpoint> globalEndoints = globallyDisabledEndpoints.keySet();
for(Endpoint endpoint : globalEndoints) {
endpoints.putIfAbsent(endpoint, new LinkedList<>());
endpoints.get(endpoint).addAll(globallyDisabledEndpoints.get(endpoint));
}
}
return endpoints;
}
private ModuleEffectiveStatementImpl(final @NonNull ModuleStmtContext ctx) {
super(ctx, findPrefix(ctx.delegate(), "module", ctx.getStatementArgument()));
submodules = ctx.getSubmodules();
qnameModule = verifyNotNull(ctx.getFromNamespace(ModuleCtxToModuleQName.class, ctx.delegate()));
final String localPrefix = findFirstEffectiveSubstatementArgument(PrefixEffectiveStatement.class).get();
final Builder<String, ModuleEffectiveStatement> prefixToModuleBuilder = ImmutableMap.builder();
prefixToModuleBuilder.put(localPrefix, this);
appendPrefixes(ctx, prefixToModuleBuilder);
prefixToModule = prefixToModuleBuilder.build();
final Map<QNameModule, String> tmp = Maps.newLinkedHashMapWithExpectedSize(prefixToModule.size() + 1);
tmp.put(qnameModule, localPrefix);
for (Entry<String, ModuleEffectiveStatement> e : prefixToModule.entrySet()) {
tmp.putIfAbsent(e.getValue().localQNameModule(), e.getKey());
}
namespaceToPrefix = ImmutableMap.copyOf(tmp);
final Map<String, StmtContext<?, ?, ?>> includedSubmodules =
ctx.getAllFromCurrentStmtCtxNamespace(IncludedSubmoduleNameToModuleCtx.class);
nameToSubmodule = includedSubmodules == null ? ImmutableMap.of()
: ImmutableMap.copyOf(Maps.transformValues(includedSubmodules,
submodule -> (SubmoduleEffectiveStatement) submodule.buildEffective()));
final Map<QName, StmtContext<?, ExtensionStatement, ExtensionEffectiveStatement>> extensions =
ctx.getAllFromCurrentStmtCtxNamespace(ExtensionNamespace.class);
qnameToExtension = extensions == null ? ImmutableMap.of()
: ImmutableMap.copyOf(Maps.transformValues(extensions, StmtContext::buildEffective));
final Map<QName, StmtContext<?, FeatureStatement, FeatureEffectiveStatement>> features =
ctx.getAllFromCurrentStmtCtxNamespace(FeatureNamespace.class);
qnameToFeature = features == null ? ImmutableMap.of()
: ImmutableMap.copyOf(Maps.transformValues(features, StmtContext::buildEffective));
final Map<QName, StmtContext<?, IdentityStatement, IdentityEffectiveStatement>> identities =
ctx.getAllFromCurrentStmtCtxNamespace(IdentityNamespace.class);
qnameToIdentity = identities == null ? ImmutableMap.of()
: ImmutableMap.copyOf(Maps.transformValues(identities, StmtContext::buildEffective));
}
/**
* Checks exactP for all actually attained D values against the implementation
* in Commons Math 3.4.1. See {@link #exactP341(double, int, int, boolean)}
* which duplicates that code.
*
* The brute force implementation enumerates all n-m partitions and counts the
* number with p-values less than d, so it really is exact (but slow). This test
* compares current code with the 3.4.1 implementation. Set maxSize higher to
* extend the test to more values. Since the 3.4.1 code is very slow, setting
* maxSize higher than 8 will make this test case run a long time.
*/
@Test
public void testExactP341RealD() {
final double tol = 1e-12;
final int maxSize = 6;
for (int m = 2; m < maxSize; m++) {
for (int n = 2; n < maxSize; n++ ) {
// Not actually used for the test, but dValues basically stores
// the ks distribution - keys are d values and values are p-values
final Map<Double, Double> dValues = new TreeMap<>();
final Iterator<int[]> combinationsIterator = CombinatoricsUtils.combinationsIterator(n + m, n);
final double[] nSet = new double[n];
final double[] mSet = new double[m];
while (combinationsIterator.hasNext()) {
// Generate an n-set
final int[] nSetI = combinationsIterator.next();
// Copy the n-set to nSet and its complement to mSet
int j = 0;
int k = 0;
for (int i = 0; i < n + m; i++) {
if (j < n && nSetI[j] == i) {
nSet[j++] = i;
} else {
mSet[k++] = i;
}
}
final KolmogorovSmirnovTest kStatTest = new KolmogorovSmirnovTest();
final double curD = kStatTest.kolmogorovSmirnovStatistic(nSet, mSet);
final double curP = kStatTest.exactP(curD, m, n, true);
dValues.putIfAbsent(curD, curP);
Assert.assertEquals(
exactP341(curD, m, n, true),
kStatTest.exactP(curD, m, n, true),
tol);
}
}
}
// Add code to display / persist dValues here if desired
}
protected List<LiveInstance> setupLiveInstances(String clusterName, int[] liveInstances) {
HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
clientConfig.setZkSerializer(new ZNRecordSerializer());
List<LiveInstance> result = new ArrayList<>();
for (int i = 0; i < liveInstances.length; i++) {
String instance = "localhost_" + liveInstances[i];
_liveInstanceOwners.putIfAbsent(clusterName, new HashMap<>());
Map<String, HelixZkClient> clientMap = _liveInstanceOwners.get(clusterName);
clientMap.putIfAbsent(instance, DedicatedZkClientFactory.getInstance()
.buildZkClient(new HelixZkClient.ZkConnectionConfig(ZK_ADDR), clientConfig));
HelixZkClient client = clientMap.get(instance);
ZKHelixDataAccessor accessor =
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(client));
Builder keyBuilder = accessor.keyBuilder();
LiveInstance liveInstance = new LiveInstance(instance);
// Keep setting the session id in the deprecated field for ensure the same behavior as a real participant.
// Note the participant is doing so for backward compatibility.
liveInstance.setSessionId(Long.toHexString(client.getSessionId()));
// Please refer to the version requirement here: helix-core/src/main/resources/cluster-manager-version.properties
// Ensuring version compatibility can avoid the warning message during test.
liveInstance.setHelixVersion("0.4");
accessor.setProperty(keyBuilder.liveInstance(instance), liveInstance);
result.add(accessor.getProperty(keyBuilder.liveInstance(instance)));
}
return result;
}
@Test
public void testKeyComputation() {
NestedSet<String> values = NestedSetBuilder.<String>stableOrder().add("a").add("b").build();
ImmutableList<CustomCommandLine> commandLines =
ImmutableList.<CustomCommandLine>builder()
.add(builder().add("arg").build())
.add(builder().addFormatted("--foo=%s", "arg").build())
.add(builder().addPrefixed("--foo=%s", "arg").build())
.add(builder().addAll(values).build())
.add(builder().addAll(VectorArg.addBefore("--foo=%s").each(values)).build())
.add(builder().addAll(VectorArg.join("--foo=%s").each(values)).build())
.add(builder().addAll(VectorArg.format("--foo=%s").each(values)).build())
.add(
builder()
.addAll(VectorArg.of(values).mapped((s, args) -> args.accept(s + "_mapped")))
.build())
.build();
// Ensure all these command lines have distinct keys
ActionKeyContext actionKeyContext = new ActionKeyContext();
Map<String, CustomCommandLine> digests = new HashMap<>();
for (CustomCommandLine commandLine : commandLines) {
Fingerprint fingerprint = new Fingerprint();
commandLine.addToFingerprint(actionKeyContext, fingerprint);
String digest = fingerprint.hexDigestAndReset();
CustomCommandLine previous = digests.putIfAbsent(digest, commandLine);
if (previous != null) {
fail(
String.format(
"Found two command lines with identical digest %s: '%s' and '%s'",
digest,
Joiner.on(' ').join(previous.arguments()),
Joiner.on(' ').join(commandLine.arguments())));
}
}
}
public Response<Map<String, List<EnumResponse>>> getCommonEnumInfo() {
Map<String, List<EnumResponse>> result = Maps.newHashMap();
result.putIfAbsent(Constants.Type.OFFER_TYPE, getOfferType());
result.putIfAbsent(Constants.Type.GOODS_TYPE, getGoodsType());
result.putIfAbsent(Constants.Type.ORDER_TYPE, getOrderType());
result.putIfAbsent(Constants.Type.ORDER_STATUS, getOrderStatus());
result.putIfAbsent(Constants.Type.MODULE_TYPE, getModuleType());
result.putIfAbsent(Constants.Type.BANNER_TYPE, getBannerType());
result.putIfAbsent(Constants.Type.SHARE_CHANNEL, getBannerShareChannel());
return new Response<>(result);
}
/**
* Returns a predicate for distinction by the given fields.
*
* @param keyExtractors key extractors
* @param <T> type of the predicate
* @return predicate for distinction by the given fields
*/
@SafeVarargs
private final <T> Predicate<T> distinctByKeys(Function<? super T, ?>... keyExtractors) {
final Map<List<?>, Boolean> seen = new ConcurrentHashMap<>();
return t -> {
final List<?> keys = Arrays.stream(keyExtractors)
.map(ke -> ke.apply(t))
.collect(Collectors.toList());
return seen.putIfAbsent(keys, Boolean.TRUE) == null;
};
}
private List<List<FileMetadata>> generatePartitionGroups(List<FileMetadata> filteredFiles) {
Map<String, List<FileMetadata>> map = new HashMap<>();
for (FileMetadata fileMetadata : filteredFiles) {
String groupId = extractGroupIdentifier(fileMetadata.getPath());
map.putIfAbsent(groupId, new ArrayList<>());
map.get(groupId).add(fileMetadata);
}
List<List<FileMetadata>> ret = new ArrayList<>();
// sort the map to guarantee consistent ordering
List<String> sortedKeys = new ArrayList<>(map.keySet());
sortedKeys.sort(Comparator.naturalOrder());
sortedKeys.stream().forEach(key -> ret.add(map.get(key)));
return ret;
}
/**
* Indexes proof entries by their height, also verifying their local correctness:
* no out-of-range nodes; no duplicates.
*
* @param treeHeight the height of the proof list tree
* @return a list of proof entries at each height from 0 to treeHeight;
* entries at each level are indexed by their index
*/
private List<Map<Long, ListProofHashedEntry>> indexHashedEntriesByHeight(int treeHeight) {
List<Map<Long, ListProofHashedEntry>> proofByHeight = new ArrayList<>(treeHeight);
for (int i = 0; i < treeHeight; i++) {
// For single-element proofs, only a single proof node is expected on each level.
// For contiguous range proofs, up to two proof nodes are expected on any level.
// Multiple-range proofs might have up to 'elements.size' proof nodes on the lowest level,
// but Exonum does not currently produce such.
int initialCapacity = (elements.size() <= 1) ? 1 : 2;
Map<Long, ListProofHashedEntry> proofAtHeight = newHashMapWithExpectedSize(initialCapacity);
proofByHeight.add(proofAtHeight);
}
for (ListProofHashedEntry hashedEntry : proof) {
// Check height
int height = hashedEntry.getHeight();
if (height < 0 || treeHeight <= height) {
throw new InvalidProofException(
String.format("Proof entry at invalid height (%d), must be in range [0; %d): %s",
height, treeHeight, hashedEntry));
}
// Check index
long levelSize = levelSizeAt(height);
long index = hashedEntry.getIndex();
if (index < 0L || levelSize <= index) {
throw new InvalidProofException(String
.format(
"Proof entry at invalid index (%d); it must be in range [0; %d) at height %d: %s",
index, levelSize, height, hashedEntry));
}
// Add the entry at the height, checking for duplicates
Map<Long, ListProofHashedEntry> proofsAtHeight = proofByHeight.get(height);
ListProofHashedEntry present = proofsAtHeight.putIfAbsent(index, hashedEntry);
if (present != null) {
throw new InvalidProofException(
String.format("Multiple proof entries at the same position: %s and %s",
present, hashedEntry));
}
}
return proofByHeight;
}
@Override
public BeanDefinition parse(final Element element, final ParserContext context) {
RootBeanDefinition definition = new RootBeanDefinition();
definition.setBeanClass(beanClass);
definition.setLazyInit(false);
String id = element.getAttribute("id");
if (requireId) {
if (isEmpty(id)) {
throw new IllegalConfigureException("spring.bean", id, "do not set", ExceptionCode.COMMON_VALUE_ILLEGAL);
} else {
if (context.getRegistry().containsBeanDefinition(id)) {
throw new IllegalConfigureException("spring.bean", id, "duplicate spring bean id", ExceptionCode.COMMON_VALUE_ILLEGAL);
}
context.getRegistry().registerBeanDefinition(id, definition);
}
}
//set各个属性值
String methodName;
String property;
CustomParser parser;
List<Method> methods = getPublicMethod(beanClass);
Map<String, CustomParser> parserMap = new HashMap<>(methods.size());
for (Method setter : methods) {
//略过不是property的方法
if (!isSetter(setter)) {
continue;
}
methodName = setter.getName();
property = methodName.substring(3, 4).toLowerCase() + methodName.substring(4);
parser = parsers.get(property);
if (parser != null) {
parserMap.put(property, parser);
} else {
Alias alias = setter.getAnnotation(Alias.class);
//对象属性对应的xml中的attribute名称
if (alias != null && !StringUtils.isEmpty(alias.value())) {
parserMap.put(alias.value(), new AliasParser(property));
} else {
//判断是否已经设置了别名
parserMap.putIfAbsent(property, new AliasParser(property));
}
}
}
parserMap.forEach((a, p) -> p.parse(definition, id, element, a, context));
return definition;
}
static void preprocessConditionalOp(ConditionalOperation task, Callback<ConditionalOperation> callback) throws Exception {
try {
task.getEvent().setEnableUUID(task.space.isEnableUUID());
// Ensure that the ID is a string or null and check for duplicate IDs
Map<String, Boolean> ids = new HashMap<>();
for (Entry<Feature> entry : task.modifyOp.entries) {
final Object objId = entry.input.get(ID);
String id = (objId instanceof String || objId instanceof Number) ? String.valueOf(objId) : null;
if (task.prefixId != null) { // Generate IDs here, if a prefixId is required. Add the prefix otherwise.
id = task.prefixId + ((id == null) ? RandomStringUtils.randomAlphanumeric(16) : id);
}
entry.input.put(ID, id);
if (id != null) {
// Minimum length of id should be 1
if (id.length() < 1) {
logger.info(task.getMarker(), "Minimum length of object id should be 1.");
callback.exception(new HttpException(BAD_REQUEST, "Minimum length of object id should be 1."));
return;
}
// Test for duplicate IDs
if (ids.containsKey(id)) {
logger.info(task.getMarker(), "Objects with the same ID {} are included in the request.", id);
callback.exception(new HttpException(BAD_REQUEST, "Objects with the same ID " + id + " is included in the request."));
return;
}
ids.put(id, true);
}
entry.input.putIfAbsent(TYPE, "Feature");
// bbox is a dynamically calculated property
entry.input.remove(BBOX);
// Add the XYZ namespace if it is not set yet.
entry.input.putIfAbsent(PROPERTIES, new HashMap<String, Object>());
@SuppressWarnings("unchecked") final Map<String, Object> properties = (Map<String, Object>) entry.input.get("properties");
properties.putIfAbsent(XyzNamespace.XYZ_NAMESPACE, new HashMap<String, Object>());
}
} catch (Exception e) {
logger.error(task.getMarker(), e.getMessage(), e);
callback.exception(new HttpException(BAD_REQUEST, "Unable to process the request input."));
return;
}
callback.call(task);
}
private ResultAndTerm waitForResults(final long electionTerm, final int submitted,
RaftConfiguration conf, Executor voteExecutor) throws InterruptedException {
final Timestamp timeout = Timestamp.currentTime().addTimeMs(server.getRandomTimeoutMs());
final Map<RaftPeerId, RequestVoteReplyProto> responses = new HashMap<>();
final List<Exception> exceptions = new ArrayList<>();
int waitForNum = submitted;
Collection<RaftPeerId> votedPeers = new ArrayList<>();
while (waitForNum > 0 && shouldRun(electionTerm)) {
final TimeDuration waitTime = timeout.elapsedTime().apply(n -> -n);
if (waitTime.isNonPositive()) {
return logAndReturn(Result.TIMEOUT, responses, exceptions, -1);
}
try {
final Future<RequestVoteReplyProto> future = voteExecutor.poll(waitTime);
if (future == null) {
continue; // poll timeout, continue to return Result.TIMEOUT
}
final RequestVoteReplyProto r = future.get();
final RaftPeerId replierId = RaftPeerId.valueOf(r.getServerReply().getReplyId());
final RequestVoteReplyProto previous = responses.putIfAbsent(replierId, r);
if (previous != null) {
if (LOG.isWarnEnabled()) {
LOG.warn("{} received duplicated replies from {}, the 2nd reply is ignored: 1st={}, 2nd={}",
this, replierId, ServerProtoUtils.toString(previous), ServerProtoUtils.toString(r));
}
continue;
}
if (r.getShouldShutdown()) {
return logAndReturn(Result.SHUTDOWN, responses, exceptions, -1);
}
if (r.getTerm() > electionTerm) {
return logAndReturn(Result.DISCOVERED_A_NEW_TERM, responses,
exceptions, r.getTerm());
}
if (r.getServerReply().getSuccess()) {
votedPeers.add(replierId);
if (conf.hasMajority(votedPeers, server.getId())) {
return logAndReturn(Result.PASSED, responses, exceptions, -1);
}
}
} catch(ExecutionException e) {
LogUtils.infoOrTrace(LOG, () -> this + " got exception when requesting votes", e);
exceptions.add(e);
}
waitForNum--;
}
// received all the responses
return logAndReturn(Result.REJECTED, responses, exceptions, -1);
}
protected static void addRepository(Map<String, LocalRepository> map, File gitDir, GitRepository gitRepository) {
LocalRepository localRepository = new LocalRepository(gitRepository, new File(gitDir, gitRepository.getName()));
map.putIfAbsent(localRepository.getCloneUrl(), localRepository);
}
/**
* Registers a newly created {@link ConnectionAdapter} for the given process and hash if not already present.
*
* @param process
* the process to associate the connection with
* @param hash
* the hash of the connection
* @param adapter
* the connection to register
* @since 9.6
*/
private static synchronized void registerAdapter(Process process, ConnectionCacheHash hash, ConnectionAdapter adapter) {
ConnectionCacheByClass typeMap = CONNECTION_CACHE.computeIfAbsent(ProcessTools.getParentProcess(process), p -> {
p.addProcessStateListener(CACHE_CLEAN_ON_STOP);
return new ConnectionCacheByClass();
});
Map<ConnectionCacheHash, ConnectionAdapter> cachedConnections =
typeMap.computeIfAbsent(adapter.getClass(), c -> new HashMap<>());
cachedConnections.putIfAbsent(hash, adapter);
}
/**
* Disable Java Buildpack Spring Auto-reconfiguration.
*
* @param environment a map containing environment variables
* @return an copy of the map setting the environment variable needed to disable auto-reconfiguration
*/
static Map<String, String> disableJavaBuildPackAutoReconfiguration(Map<String, String> environment) {
Map<String, String> updatedEnvironment = new HashMap<>(environment);
updatedEnvironment.putIfAbsent(JBP_CONFIG_SPRING_AUTO_RECONFIGURATION, ENABLED_FALSE);
return updatedEnvironment;
}
/**
* @param
* @return
* @Description: TODO(对节点集通过ID去重)
*/
private static <T> Predicate<T> distinctById(Function<? super T, ?> idExtractor) {
Map<Object, Boolean> seen = new ConcurrentHashMap<>();
return t -> seen.putIfAbsent(idExtractor.apply(t), Boolean.TRUE) == null;
}