java.util.Map#values ( )源码实例Demo

下面列出了java.util.Map#values ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@Test
void loadsUnloadsAll(@TempDir Path tmp) throws Exception {
  Map<String, Path> pluginLocationsById = ImmutableMap.of(PLUGIN_ID, tmp.resolve("p1.jar"),
      PLUGIN_ID_2, tmp.resolve("p2.jar"));

  // Prepare the artifact files
  for (Entry<String, Path> e : pluginLocationsById.entrySet()) {
    anArtifact()
        .setPluginId(e.getKey())
        .writeTo(e.getValue());
  }

  // Load the plugins
  for (Path location : pluginLocationsById.values()) {
    serviceLoader.loadService(location);
  }

  // Unload all
  serviceLoader.unloadAll();

  // Verify each is unloaded
  Set<String> pluginIds = pluginLocationsById.keySet();
  pluginIds.forEach(this::verifyUnloaded);
}
 
/**
 * 触发资源回滚,使得每一个资源实例都触发回滚
 * @param status
 */
protected void triggerRollback(TransactionStatus status){
	Map<Object, TransactionResource> trs = TransactionResourceManager.getResourceMap();
	List<Throwable> rollbackException = new ArrayList<>();
	List<TransactionResource> res = new ArrayList<>();
	for (TransactionResource tr : trs.values()) {
		try {
			tr.rollback();
		} catch (Throwable e) {
			rollbackException.add(e);
			res.add(tr);
		}
	}
	
	if(rollbackException.size() > 0){
		logger.info("The transaction resource rollback failure. total " + res.size());
	}
}
 
源代码3 项目: o2oa   文件: IdentityFactory.java
public List<Identity> listMajorOfPerson(Business business, List<String> people) throws Exception {
	List<Identity> list = new ArrayList<>();
	List<Identity> identities = business.entityManagerContainer().listIn(Identity.class, Identity.person_FIELDNAME,
			people);
	Map<String, List<Identity>> map = identities.stream().collect(Collectors.groupingBy(Identity::getPerson));
	for (List<Identity> os : map.values()) {
		Optional<Identity> optional = os.stream().filter(o -> BooleanUtils.isTrue(o.getMajor())).findFirst();
		if (optional.isPresent()) {
			list.add(optional.get());
		} else {
			list.add(os.stream()
					.sorted(Comparator.comparing(Identity::getUnitLevel, Comparator.nullsLast(Integer::compareTo))
							.thenComparing(Identity::getUpdateTime, Comparator.nullsLast(Date::compareTo)))
					.findFirst().get());
		}
	}
	return list;
}
 
源代码4 项目: DDMQ   文件: ConsumeSubscriptionServiceImpl.java
private void removeCProxy(String host, List<ConsumeSubscription> subList) throws Exception {
    String ipPort = HostUtils.getIpPortFromHost(host, ZKV4ConfigServiceImpl.DEFAULT_CPROXY_PORT);
    Set<Long> clusterIds = Sets.newHashSet();
    for (ConsumeSubscription sub : subList) {
        if (sub.getConsumeSubscriptionConfig() != null && MapUtils.isNotEmpty(sub.getConsumeSubscriptionConfig().getProxies())) {
            Map<String, Set<String>> proxyMap = sub.getConsumeSubscriptionConfig().getProxies();
            for (Set<String> ipLists : proxyMap.values()) {
                if (ipLists.contains(ipPort)) {
                    ipLists.remove(ipPort);
                    clusterIds.add(sub.getClusterId());
                    updateByPrimaryKey(sub);

                    pushV4ZkInfo(sub.getGroupId(), null);
                    LOGGER.info("remove cproxy {} success, subId={}, group={}, topic={}, cluster={}", ipPort, sub.getId(), sub.getGroupName(), sub.getTopicName(), sub.getClusterName());
                }
            }
        }
    }
    if (CollectionUtils.isNotEmpty(clusterIds)) {
        zkv4ConfigService.updateCProxyConfigByClusterId("removeCProxy", clusterIds);
    }
}
 
源代码5 项目: activiti6-boot2   文件: DbSqlSession.java
public void determineUpdatedObjects() {
  updatedObjects = new ArrayList<Entity>();
  Map<Class<?>, Map<String, CachedEntity>> cachedObjects = entityCache.getAllCachedEntities();
  for (Class<?> clazz : cachedObjects.keySet()) {

    Map<String, CachedEntity> classCache = cachedObjects.get(clazz);
    for (CachedEntity cachedObject : classCache.values()) {

      Entity cachedEntity = cachedObject.getEntity();
      
      // Executions are stored as a hierarchical tree, and updates are important to execute
      // even when the execution are deleted, as they can change the parent-child relationships.
      // For the other entities, this is not applicable and an update can be discarded when an update follows.
      
      if (!isEntityInserted(cachedEntity) && 
          (ExecutionEntity.class.isAssignableFrom(cachedEntity.getClass()) || !isEntityToBeDeleted(cachedEntity)) &&
          cachedObject.hasChanged() 
          ) {
        updatedObjects.add(cachedEntity);
      }
    }
  }
}
 
源代码6 项目: onetwo   文件: MvcInterceptorManager.java
@Override
public void onHandlerMethodsInitialized(Map<RequestMappingInfo, HandlerMethod> handlerMethods) {
	for(HandlerMethod hm : handlerMethods.values()){
		List<? extends MvcInterceptor> interceptors = null;
		try {
			interceptors = findMvcInterceptors(hm);
		} catch (Exception e) {
			throw new BaseException("find MvcInterceptor error for HandlerMethod: " + hm.getMethod(), e);
		}
		if(!interceptors.isEmpty()){
			AnnotationAwareOrderComparator.sort(interceptors);
			HandlerMethodInterceptorMeta meta = new HandlerMethodInterceptorMeta(hm, interceptors);
			interceptorMetaCaces.put(hm.getMethod(), meta);
			if(log.isDebugEnabled()){
				log.debug("MvcInterceptor: {} -> {}", hm.getMethod(), interceptors);
			}
		}
	}
}
 
源代码7 项目: codebuff   文件: MapModelAdaptor.java
@Override
public Object getProperty(Interpreter interp, ST self, Object o, Object property, String propertyName) throws STNoSuchPropertyException {
    Object value;
    Map<?, ?> map = (Map<?, ?>)o;
    if ( property==null ) value = map.get(STGroup.DEFAULT_KEY);
    else if ( property.equals("keys") ) value = map.keySet();
    else if ( property.equals("values") ) value = map.values();
    else if ( map.containsKey(property) ) value = map.get(property);
    else if ( map.containsKey(propertyName) ) { // if can't find the key, try toString version
        value = map.get(propertyName);
         }
    else value = map.get(STGroup.DEFAULT_KEY); // not found, use default
    if ( value==STGroup.DICT_KEY ) {
        value = property;
    }
    return value;
}
 
源代码8 项目: incubator-pinot   文件: HolidayEventsLoader.java
void mergeWithExistingHolidays(Map<String, List<EventDTO>> holidayNameToHolidayEvent, List<EventDTO> existingEvents) {
  for (EventDTO existingEvent : existingEvents) {
    String holidayName = existingEvent.getName();
    if (!holidayNameToHolidayEvent.containsKey(holidayName)) {
      // If a event disappears, delete the event
      eventDAO.delete(existingEvent);
    } else {
      // If an existing event shows up again, overwrite with new time and country code.
      List<EventDTO> eventList = holidayNameToHolidayEvent.get(holidayName);
      EventDTO newEvent = eventList.remove(eventList.size() - 1);

      existingEvent.setStartTime(newEvent.getStartTime());
      existingEvent.setEndTime(newEvent.getEndTime());
      existingEvent.setTargetDimensionMap(newEvent.getTargetDimensionMap());
      eventDAO.update(existingEvent);

      if (eventList.isEmpty()) {
        holidayNameToHolidayEvent.remove(holidayName);
      }
    }
  }

  // Add all remaining new events into the database
  for (List<EventDTO> eventDTOList : holidayNameToHolidayEvent.values()) {
    for (EventDTO eventDTO : eventDTOList) {
      eventDAO.save(eventDTO);
    }
  }
}
 
源代码9 项目: juddi   文件: UDDIClient.java
/**
 * Removes the bindings of the services in the annotated classes.
 * Multiple nodes may register the same service using different
 * BindingTempates. If the last BindingTemplate is removed the service
 * can be removed as well.
 *
 * @param removeServiceWithNoBindingTemplates - if set to true it will
 * remove the service if there are no other BindingTemplates.
 */
public void unRegisterBindingsOfAnnotatedServices(boolean removeServiceWithNoBindingTemplates) {

        Map<String, UDDIClerk> clerks = clientConfig.getUDDIClerks();
        if (clerks.size() > 0) {
                AnnotationProcessor ap = new AnnotationProcessor();
                for (UDDIClerk clerk : clerks.values()) {
                        Collection<BusinessService> services = ap.readServiceAnnotations(
                                clerk.getClassWithAnnotations(), clerk.getUDDINode().getProperties());
                        for (BusinessService businessService : services) {
                                if (businessService.getBindingTemplates() != null) {
                                        List<BindingTemplate> bindingTemplates = businessService.getBindingTemplates().getBindingTemplate();
                                        for (BindingTemplate bindingTemplate : bindingTemplates) {
                                                clerk.unRegisterBinding(bindingTemplate.getBindingKey(), clerk.getUDDINode().getApiNode());
                                        }
                                }
                                if (removeServiceWithNoBindingTemplates) {
                                        try {
                                                BusinessService existingService = clerk.getServiceDetail(businessService.getServiceKey(), clerk.getUDDINode().getApiNode());
                                                if (existingService.getBindingTemplates() == null || existingService.getBindingTemplates().getBindingTemplate().size() == 0) {
                                                        clerk.unRegisterService(businessService.getServiceKey(), clerk.getUDDINode().getApiNode());
                                                }
                                        } catch (Exception e) {
                                                log.error(e.getMessage(), e);
                                        }
                                }
                        }
                }
        }

}
 
源代码10 项目: oasp4j-ide   文件: Configurator.java
/**
 * @return a {@link Collection} with all available {@link EclipseWorkspaceFile}.
 */
private Collection<EclipseWorkspaceFile> collectWorkspaceFiles() {

  Map<String, EclipseWorkspaceFile> fileMap = new HashMap<String, EclipseWorkspaceFile>();
  collectWorkspaceFiles(new File(this.pluginsUpdateDirectoryPath), fileMap);
  collectWorkspaceFiles(new File(this.pluginsSetupDirectoryPath), fileMap);
  Collection<EclipseWorkspaceFile> values = fileMap.values();
  Log.LOGGER.info("Collected " + values.size() + " configuration files.");
  return values;
}
 
源代码11 项目: common-kafka   文件: KafkaAdminClient.java
private Collection<TopicDescription> getTopicDescriptions(Collection<String> topics) {
    try {
        Map<String, TopicDescription> topicDescriptions = getNewAdminClient().describeTopics(topics).all()
            .get(operationTimeout, TimeUnit.MILLISECONDS);
        if (topics.isEmpty()) {
            LOG.warn("Unable to describe Kafka topics");
        }
        return topicDescriptions.values();
    } catch (InterruptedException | ExecutionException | TimeoutException e) {
        throw new AdminOperationException("Unable to describe Kafka topics", e);
    }
}
 
源代码12 项目: gemfirexd-oss   文件: DestroyRegionExecutor.java
public Object executeAndVerify(TestCommandInstance instance) {    
  TestableGfsh gfsh = CLITest.getTestableShell();
  if(!gfsh.isConnectedAndReady()){
    CLITest.connectGfshToManagerNode();
  }    
  Object object[] = TestableGfsh.execAndLogCommand(gfsh, instance.toString(), CLITest.getGfshOutputFile(), false);
  Map map = (Map) object[0];
  CommandResult result =null;
  Collection values = map.values();
  for(Object r : values){
    if(r instanceof CommandResult){
      result = (CommandResult) r;      
      if(!result.getStatus().equals(Result.Status.OK)){
        //throw new TestException("Command return status is *NOT* OK. Command execution has failed");
        CLITest.currentCommand.append(" Command return status is *NOT* OK. Command execution has failed");
        CLITest.hasCommandFailed = true;
      }
      else
        HydraUtil.logInfo("Completed exeuction of <" + instance + "> successfully");         
    }
  }    
  verifyGemfire(gfsh,object);verifyJMX(gfsh,object);verifyCommand(gfsh,object);    
  
  RegionEvents e = new RegionEvents();
  e.regionDestroyed(regionPath, null);
  e.exportToBlackBoard(OperationsBlackboard.getBB());
  this.regionPath = null;    

  return object;
}
 
源代码13 项目: redisson   文件: RedissonMapReduceTest.java
@Override
public Integer collate(Map<String, Integer> resultMap) {
    redisson.getAtomicLong("test").incrementAndGet();
    
    int result = 0;
    for (Integer count : resultMap.values()) {
        result += count;
    }
    return result;
}
 
源代码14 项目: hadoop   文件: TestDataNodeHotSwapVolumes.java
@Test(timeout=60000)
public void testAddVolumesDuringWrite()
    throws IOException, InterruptedException, TimeoutException,
    ReconfigurationException {
  startDFSCluster(1, 1);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  Path testFile = new Path("/test");
  createFile(testFile, 4);  // Each volume has 2 blocks.

  addVolumes(2);

  // Continue to write the same file, thus the new volumes will have blocks.
  DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8);
  verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4);
  // After appending data, there should be [2, 2, 4, 4] blocks in each volume
  // respectively.
  List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4);

  List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
      cluster.getAllBlockReports(bpid);
  assertEquals(1, blockReports.size());  // 1 DataNode
  assertEquals(4, blockReports.get(0).size());  // 4 volumes
  Map<DatanodeStorage, BlockListAsLongs> dnReport =
      blockReports.get(0);
  List<Integer> actualNumBlocks = new ArrayList<Integer>();
  for (BlockListAsLongs blockList : dnReport.values()) {
    actualNumBlocks.add(blockList.getNumberOfBlocks());
  }
  Collections.sort(actualNumBlocks);
  assertEquals(expectedNumBlocks, actualNumBlocks);
}
 
源代码15 项目: singleton   文件: CLDRUtils.java
/**
 * Query the availableLocales.JSON of CLDR to determine if there is a pattern json
 * and if not, query defaultContent.json
 *
 * @param region
 * @param language
 * @param availableLocales
 * @param defaultContentLocales
 * @param localeAliases
 * @return
 */
private static boolean isPatternExist(String region, String language,
                                      Map<String, String> availableLocales,
                                      Map<String, String> defaultContentLocales,
                                      Map<String, Object> localeAliases) {
    if (language.contains("_")) {
        language = language.replace("_", "-");
    }
    String newLocale = language + "-" + region;
    String matchLocale = CommonUtil.getMatchingLocale(localeAliases, newLocale);
    if (!CommonUtil.isEmpty(matchLocale)) {
        newLocale = matchLocale;
    }

    for (String locale : availableLocales.values()) {
        if (locale.equals(newLocale)) {
            return true;
        }
    }

    for (String defaultRegion : defaultContentLocales.values()) {
        if (defaultRegion.equals(newLocale)) {
            return true;
        }
    }
    return false;
}
 
源代码16 项目: aedict   文件: DownloadDictionaryActivity.java
@Override
public List<DownloadableDictionaryInfo> impl(Void... params) throws Exception {
	publish(new Progress(AedictApp.getStr(R.string.downloadingDictionaryList), 0, 1));
	final Map<String, DownloadableDictionaryInfo> result = new HashMap<String, DownloadableDictionaryInfo>();
	final BufferedReader reader = new BufferedReader(new InputStreamReader(new URL(DICT_LIST_URL).openStream(), "UTF-8"));
	try {
		while (true) {
			final String line = reader.readLine();
			if (line == null) {
				break;
			}
			if (MiscUtils.isBlank(line)) {
				continue;
			}
			final DownloadableDictionaryInfo info = DownloadableDictionaryInfo.parse(line);
			result.put(info.zipName, info);
		}
	} finally {
		MiscUtils.closeQuietly(reader);
	}
	// remove all dictionaries which are already downloaded
	for (final Dictionary d: Dictionary.listEdictInstalled()) {
		if(d.custom!=null){
			result.remove("edict-lucene-"+d.custom+".zip");
		}
	}
	final List<DownloadableDictionaryInfo> items = new ArrayList<DownloadableDictionaryInfo>(result.values());
	Collections.sort(items);
	return items;
}
 
源代码17 项目: lucene-solr   文件: SolrTestCaseHS.java
@SuppressWarnings({"unchecked"})
public static Object createDocObjects(@SuppressWarnings({"rawtypes"})Map<Comparable, Doc> fullModel,
                                      @SuppressWarnings({"rawtypes"})Comparator sort, int rows,
                                      Collection<String> fieldNames) {
  List<Doc> docList = new ArrayList<>(fullModel.values());
  Collections.sort(docList, sort);
  @SuppressWarnings({"rawtypes"})
  List sortedDocs = new ArrayList(rows);
  for (Doc doc : docList) {
    if (sortedDocs.size() >= rows) break;
    Map<String,Object> odoc = toObject(doc, h.getCore().getLatestSchema(), fieldNames);
    sortedDocs.add(toObject(doc, h.getCore().getLatestSchema(), fieldNames));
  }
  return sortedDocs;
}
 
源代码18 项目: gemfirexd-oss   文件: GemFireTimeSync.java
private long getRTTStdDev(Map<Address, GFTimeSyncHeader> values, long average) {
  long sqDiffs = 0;
  for (GFTimeSyncHeader response: values.values()) {
    long diff = average - (response.timeReceived - response.time);
    sqDiffs += diff * diff;
  }
  return Math.round(Math.sqrt((double)sqDiffs));
}
 
源代码19 项目: Flink-CEPplus   文件: JsonJobGraphGenerationTest.java
@Override
public void validateJson(String json) throws Exception {
	final Map<String, JsonNode> idToNode = new HashMap<>();

	// validate the produced JSON
	ObjectMapper m = new ObjectMapper();
	JsonNode rootNode = m.readTree(json);

	JsonNode idField = rootNode.get("jid");
	JsonNode nameField = rootNode.get("name");
	JsonNode arrayField = rootNode.get("nodes");

	assertNotNull(idField);
	assertNotNull(nameField);
	assertNotNull(arrayField);
	assertTrue(idField.isTextual());
	assertTrue(nameField.isTextual());
	assertTrue(arrayField.isArray());

	ArrayNode array = (ArrayNode) arrayField;
	Iterator<JsonNode> iter = array.elements();
	while (iter.hasNext()) {
		JsonNode vertex = iter.next();

		JsonNode vertexIdField = vertex.get("id");
		JsonNode parallelismField = vertex.get("parallelism");
		JsonNode contentsFields = vertex.get("description");
		JsonNode operatorField = vertex.get("operator");

		assertNotNull(vertexIdField);
		assertTrue(vertexIdField.isTextual());
		assertNotNull(parallelismField);
		assertTrue(parallelismField.isNumber());
		assertNotNull(contentsFields);
		assertTrue(contentsFields.isTextual());
		assertNotNull(operatorField);
		assertTrue(operatorField.isTextual());

		if (contentsFields.asText().startsWith("Sync")) {
			assertEquals(1, parallelismField.asInt());
		}
		else {
			assertEquals(expectedParallelism, parallelismField.asInt());
		}

		idToNode.put(vertexIdField.asText(), vertex);
	}

	assertEquals(numNodes, idToNode.size());

	// check that all inputs are contained
	for (JsonNode node : idToNode.values()) {
		JsonNode inputsField = node.get("inputs");
		if (inputsField != null) {
			Iterator<JsonNode> inputsIter = inputsField.elements();
			while (inputsIter.hasNext()) {
				JsonNode inputNode = inputsIter.next();
				JsonNode inputIdField = inputNode.get("id");

				assertNotNull(inputIdField);
				assertTrue(inputIdField.isTextual());

				String inputIdString = inputIdField.asText();
				assertTrue(idToNode.containsKey(inputIdString));
			}
		}
	}
}
 
源代码20 项目: incubator-pinot   文件: BenchmarkQueryEngine.java
@Setup
public void startPinot()
    throws Exception {
  System.out.println("Using table name " + TABLE_NAME);
  System.out.println("Using data directory " + DATA_DIRECTORY);
  System.out.println("Starting pinot");

  PerfBenchmarkDriverConf conf = new PerfBenchmarkDriverConf();
  conf.setStartBroker(true);
  conf.setStartController(true);
  conf.setStartServer(true);
  conf.setStartZookeeper(true);
  conf.setRunQueries(false);
  conf.setServerInstanceSegmentTarDir(null);
  conf.setServerInstanceDataDir(DATA_DIRECTORY);
  conf.setConfigureResources(false);
  _perfBenchmarkDriver = new PerfBenchmarkDriver(conf);
  _perfBenchmarkDriver.run();

  File[] segments = new File(DATA_DIRECTORY, TABLE_NAME).listFiles();
  for (File segmentDir : segments) {
    SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(segmentDir);
    _perfBenchmarkDriver.configureTable(TABLE_NAME);
    System.out.println("Adding segment " + segmentDir.getAbsolutePath());
    _perfBenchmarkDriver.addSegment(TABLE_NAME, segmentMetadata);
  }

  ZkClient client = new ZkClient("localhost:2191", 10000, 10000, new ZNRecordSerializer());

  ZNRecord record = client.readData("/PinotPerfTestCluster/EXTERNALVIEW/" + TABLE_NAME);
  while (true) {
    System.out.println("record = " + record);
    Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);

    int onlineSegmentCount = 0;
    for (Map<String, String> instancesAndStates : record.getMapFields().values()) {
      for (String state : instancesAndStates.values()) {
        if (state.equals("ONLINE")) {
          onlineSegmentCount++;
          break;
        }
      }
    }

    System.out.println(onlineSegmentCount + " segments online out of " + segments.length);

    if (onlineSegmentCount == segments.length) {
      break;
    }

    record = client.readData("/PinotPerfTestCluster/EXTERNALVIEW/" + TABLE_NAME);
  }

  ranOnce = false;

  System.out.println(_perfBenchmarkDriver.postQuery(QUERY_PATTERNS[queryPattern], optimizationFlags).toString());
}