java.util.Map#entrySet ( )源码实例Demo

下面列出了java.util.Map#entrySet ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: freeacs   文件: UnittypeParameters.java
public UnittypeParameters(
    Map<Integer, UnittypeParameter> idMap,
    Map<String, UnittypeParameter> nameMap,
    Unittype unittype) {
  this.idMap = idMap;
  this.nameMap = nameMap;
  this.unittype = unittype;
  this.alwaysMap = new HashMap<>();
  this.displayableMap = new HashMap<>();
  this.searchableMap = new HashMap<>();
  for (Entry<Integer, UnittypeParameter> entry : idMap.entrySet()) {
    if (entry.getValue().getFlag().isAlwaysRead()) {
      alwaysMap.put(entry.getKey(), entry.getValue());
    }
    if (entry.getValue().getFlag().isDisplayable()) {
      displayableMap.put(entry.getKey(), entry.getValue());
    }
    if (entry.getValue().getFlag().isSearchable()) {
      searchableMap.put(entry.getKey(), entry.getValue());
    }
  }
}
 
源代码2 项目: TorrentEngine   文件: ConcurrentHashMapWrapper.java
public void
putAll(
	Map<S,T>	from_map )
{
	for ( Map.Entry<S,T> entry: from_map.entrySet()){
		
		S key 	= entry.getKey();
		T value	= entry.getValue();
		
		if ( key == null ){
			
			key = S_NULL;
		}
		
		if ( value == null ){
			
			value = T_NULL;
		}
		
		map.put( key, value );
	}
}
 
源代码3 项目: http-api-invoker   文件: MockRequestor.java
private boolean isMapMatch(Map<String, ?> mapFromMockRule, Map<String, ?> mapFromRequest) {
    // 无需匹配
    if (mapFromMockRule == null || mapFromMockRule.isEmpty()) {
        return true;
    }
    // 请求中没有 cookies
    if (mapFromRequest == null || mapFromRequest.isEmpty()) {
        return false;
    }
    for (Map.Entry<String, ?> entry : mapFromMockRule.entrySet()) {
        Object value = mapFromRequest.get(entry.getKey());
        // 只要有一个 cookie 与请求不符,则不匹配
        if (!ObjectUtils.equals(entry.getValue(), value)) {
            return false;
        }
    }
    return true;
}
 
源代码4 项目: mapleLemon   文件: LoginWorker.java
public static void updateChannel(final MapleClient c) {
    if (System.currentTimeMillis() - lastUpdate > 10 * 60 * 1000) {
        lastUpdate = System.currentTimeMillis();
        Map<Integer, Integer> load = ChannelServer.getChannelLoad();
        int usersOn = 0;
        if (load.size() <= 0) {
            lastUpdate = 0;
            c.getSession().write(LoginPacket.getLoginFailed(7));
            return;
        }
        double loadFactor = LoginServer.getUserLimit() / load.size(); // 每个频道人数
        for (Entry<Integer, Integer> entry : load.entrySet()) {
            load.put(entry.getKey(), Math.min(255, (int) (entry.getValue() / loadFactor * 255)));
        }
        LoginServer.setLoad(load, usersOn);
        lastUpdate = System.currentTimeMillis();
    }
}
 
源代码5 项目: triplea   文件: BattleRecords.java
public BattleRecords(final BattleRecords records) {
  this.records = new HashMap<>();
  for (final Entry<GamePlayer, Map<UUID, BattleRecord>> entry : records.records.entrySet()) {
    final GamePlayer p = entry.getKey();
    final Map<UUID, BattleRecord> record = entry.getValue();
    final Map<UUID, BattleRecord> map = new HashMap<>();
    for (final Entry<UUID, BattleRecord> entry2 : record.entrySet()) {
      map.put(entry2.getKey(), new BattleRecord(entry2.getValue()));
    }
    this.records.put(p, map);
  }
}
 
源代码6 项目: hbase   文件: TestClassLoading.java
void assertAllRegionServers(String tableName) throws InterruptedException {
  Map<ServerName, ServerMetrics> servers;
  boolean success = false;
  String[] expectedCoprocessors = regionServerSystemCoprocessors;
  if (tableName == null) {
    // if no tableName specified, use all servers.
    servers = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager().getOnlineServers();
  } else {
    servers = serversForTable(tableName);
  }
  for (int i = 0; i < 5; i++) {
    boolean any_failed = false;
    for(Map.Entry<ServerName, ServerMetrics> server: servers.entrySet()) {
      String[] actualCoprocessors =
        server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]);
      if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) {
        LOG.debug("failed comparison: actual: " +
            Arrays.toString(actualCoprocessors) +
            " ; expected: " + Arrays.toString(expectedCoprocessors));
        any_failed = true;
        expectedCoprocessors = switchExpectedCoprocessors(expectedCoprocessors);
        break;
      }
      expectedCoprocessors = switchExpectedCoprocessors(expectedCoprocessors);
    }
    if (any_failed == false) {
      success = true;
      break;
    }
    LOG.debug("retrying after failed comparison: " + i);
    Thread.sleep(1000);
  }
  assertTrue(success);
}
 
/**
 * Extract schema schema.
 *
 * @param components the components
 * @param returnType the return type
 * @param jsonView the json view
 * @param annotations the annotations
 * @return the schema
 */
public static Schema extractSchema(Components components, Type returnType, JsonView jsonView, Annotation[] annotations) {
	Schema schemaN = null;
	ResolvedSchema resolvedSchema = null;
	try {
		resolvedSchema = ModelConverters.getInstance()
				.resolveAsResolvedSchema(
						new AnnotatedType(returnType).resolveAsRef(true).jsonViewAnnotation(jsonView).ctxAnnotations(annotations));
	}
	catch (Exception e) {
		LOGGER.warn(Constants.GRACEFUL_EXCEPTION_OCCURRED, e);
		return null;
	}
	if (resolvedSchema.schema != null) {
		schemaN = resolvedSchema.schema;
		Map<String, Schema> schemaMap = resolvedSchema.referencedSchemas;
		if (schemaMap != null) {
			for (Map.Entry<String, Schema> entry : schemaMap.entrySet()) {
				Map<String, Schema> componentSchemas = components.getSchemas();
				if (componentSchemas == null) {
					componentSchemas = new LinkedHashMap<>();
					componentSchemas.put(entry.getKey(), entry.getValue());
				}
				else if (!componentSchemas.containsKey(entry.getKey())) {
					componentSchemas.put(entry.getKey(), entry.getValue());
				}
				components.setSchemas(componentSchemas);
			}
		}
	}
	return schemaN;
}
 
源代码8 项目: CQL   文件: NraViewer.java
private static <X, Y> Set<Pair<X, Y>> unnest1(Map<X, Set<Y>> s) {
	Set<Pair<X, Y>> ret = new HashSet<>();

	for (Entry<X, Set<Y>> x : s.entrySet()) {
		for (Y y : x.getValue()) {
			ret.add(new Pair<>(x.getKey(), y));
		}
	}

	return ret;
}
 
源代码9 项目: coming   文件: JGenProg2017_0013_s.java
private String[] toArray(Map<String, Integer> era) {
    String[] eras = new String[era.size()]; // assume no gaps in entry values
    for(Map.Entry<String, Integer> me : era.entrySet()) {
        int idx = me.getValue().intValue();
        final String key = me.getKey();
        if (key == null) {
            throw new IllegalArgumentException();
        }
        eras[idx] = key;
    }
    return eras;
}
 
源代码10 项目: jHardware   文件: WindowsDisplayInfo.java
private void completeWithDXData(Map<String, String> displayDataMap) {
    Map<String, String> dxDisplayDataMap = getInfoFromDXDiag();

    for (Map.Entry<String, String> entry : displayDataMap.entrySet()) {
        if (entry.getValue() == null || entry.getValue().isEmpty()) {
            entry.setValue(dxDisplayDataMap.get(entry.getKey()));
        }
    }
}
 
源代码11 项目: java-sdk   文件: Assistant.java
/**
 * List user input examples.
 *
 * <p>List the user input examples for an intent, optionally including contextual entity mentions.
 *
 * <p>This operation is limited to 2500 requests per 30 minutes. For more information, see **Rate
 * limiting**.
 *
 * @param listExamplesOptions the {@link ListExamplesOptions} containing the options for the call
 * @return a {@link ServiceCall} with a response type of {@link ExampleCollection}
 */
public ServiceCall<ExampleCollection> listExamples(ListExamplesOptions listExamplesOptions) {
  com.ibm.cloud.sdk.core.util.Validator.notNull(
      listExamplesOptions, "listExamplesOptions cannot be null");
  String[] pathSegments = {"v1/workspaces", "intents", "examples"};
  String[] pathParameters = {listExamplesOptions.workspaceId(), listExamplesOptions.intent()};
  RequestBuilder builder =
      RequestBuilder.get(
          RequestBuilder.constructHttpUrl(getServiceUrl(), pathSegments, pathParameters));
  builder.query("version", versionDate);
  Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("conversation", "v1", "listExamples");
  for (Entry<String, String> header : sdkHeaders.entrySet()) {
    builder.header(header.getKey(), header.getValue());
  }
  builder.header("Accept", "application/json");
  if (listExamplesOptions.pageLimit() != null) {
    builder.query("page_limit", String.valueOf(listExamplesOptions.pageLimit()));
  }
  if (listExamplesOptions.sort() != null) {
    builder.query("sort", listExamplesOptions.sort());
  }
  if (listExamplesOptions.cursor() != null) {
    builder.query("cursor", listExamplesOptions.cursor());
  }
  if (listExamplesOptions.includeAudit() != null) {
    builder.query("include_audit", String.valueOf(listExamplesOptions.includeAudit()));
  }
  ResponseConverter<ExampleCollection> responseConverter =
      ResponseConverterUtils.getValue(
          new com.google.gson.reflect.TypeToken<ExampleCollection>() {}.getType());
  return createServiceCall(builder.build(), responseConverter);
}
 
源代码12 项目: ignite   文件: GridTaskSessionImpl.java
/**
 * @param attrs Attributes to set.
 */
public void setInternal(Map<?, ?> attrs) {
    A.notNull(attrs, "attrs");

    checkFullSupport();

    if (attrs.isEmpty())
        return;

    List<ComputeTaskSessionAttributeListener> lsnrs;

    synchronized (mux) {
        if (this.attrs == null)
            this.attrs = new HashMap<>(attrs.size(), 1.0f);

        this.attrs.putAll(attrs);

        lsnrs = this.lsnrs;

        mux.notifyAll();
    }

    if (lsnrs != null)
        for (Map.Entry<?, ?> entry : attrs.entrySet())
            for (ComputeTaskSessionAttributeListener lsnr : lsnrs)
                lsnr.onAttributeSet(entry.getKey(), entry.getValue());
}
 
private String highestScorePrediction(Map<String, Double> predictions) {
	Double predictionValue = null;
	String prediction = null;
	for (Map.Entry<String, Double> e : 	predictions.entrySet()) {
		if(predictionValue == null) {
			prediction = e.getKey();
			predictionValue = e.getValue();
		}
		if(Double.compare(e.getValue(), predictionValue) > 0) {
			predictionValue = e.getValue();
			prediction = e.getKey();
		}
	}
	return prediction;
}
 
源代码14 项目: linstor-server   文件: PropsContainer.java
@Override
public void loadAll()
    throws DatabaseException, AccessDeniedException
{
    try
    {
        Map<String, String> loadedProps = dbDriver.loadAll(instanceName);
        for (Map.Entry<String, String> entry : loadedProps.entrySet())
        {
            String key = entry.getKey();
            String value = entry.getValue();

            PropsContainer targetContainer = this;
            int idx = key.lastIndexOf(Props.PATH_SEPARATOR);
            if (idx != -1)
            {
                targetContainer = ensureNamespaceExists(key.substring(0, idx));
            }
            String actualKey = key.substring(idx + 1);
            String oldValue = targetContainer.getRawPropMap().put(actualKey, value);
            if (oldValue == null)
            {
                targetContainer.modifySize(1);
            }
        }
    }
    catch (InvalidKeyException invalidKeyExc)
    {
        throw new LinStorDBRuntimeException(
                "PropsContainer could not be loaded because a key in the database has an invalid value.",
                invalidKeyExc
        );
    }
}
 
源代码15 项目: kogito-runtimes   文件: BuildUtils.java
/**
 * Calculates the temporal distance between all event patterns in the given 
 * subrule.
 * 
 * @param groupElement the root element of a subrule being added to the rulebase
 */
public TemporalDependencyMatrix calculateTemporalDistance(GroupElement groupElement) {
    // find the events
    List<Pattern> events = new ArrayList<Pattern>();
    selectAllEventPatterns( events,
                            groupElement );

    final int size = events.size();
    if ( size >= 1 ) {
        // create the matrix
        Interval[][] source = new Interval[size][];
        for ( int row = 0; row < size; row++ ) {
            source[row] = new Interval[size];
            for ( int col = 0; col < size; col++ ) {
                if ( row == col ) {
                    source[row][col] = new Interval( 0,
                                                     0 );
                } else {
                    source[row][col] = new Interval( Interval.MIN,
                                                     Interval.MAX );
                }
            }
        }

        Interval[][] result;
        if ( size > 1 ) {
            List<Declaration> declarations = new ArrayList<Declaration>();
            int eventIndex = 0;
            // populate the matrix
            for ( Pattern event : events ) {
                // references to other events are always backward references, so we can build the list as we go
                declarations.add( event.getDeclaration() );
                Map<Declaration, Interval> temporal = new HashMap<Declaration, Interval>();
                gatherTemporalRelationships( event.getConstraints(),
                                             temporal );
                // intersects default values with the actual constrained intervals
                for ( Map.Entry<Declaration, Interval> entry : temporal.entrySet() ) {
                    int targetIndex = declarations.indexOf( entry.getKey() );
                    Interval interval = entry.getValue();
                    source[targetIndex][eventIndex].intersect( interval );
                    Interval reverse = new Interval( interval.getUpperBound() == Long.MAX_VALUE ? Long.MIN_VALUE : -interval.getUpperBound(), 
                                                     interval.getLowerBound() == Long.MIN_VALUE ? Long.MAX_VALUE : -interval.getLowerBound() );
                    source[eventIndex][targetIndex].intersect( reverse );
                }
                eventIndex++;
            }
            result = TimeUtils.calculateTemporalDistance( source );
        } else {
            result = source;
        }
        return new TemporalDependencyMatrix( result, events );
    }
    return null;
}
 
源代码16 项目: hadoop   文件: ContainerManagerImpl.java
public void cleanupContainersOnNMResync() {
  Map<ContainerId, Container> containers = context.getContainers();
  if (containers.isEmpty()) {
    return;
  }
  LOG.info("Containers still running on "
      + CMgrCompletedContainersEvent.Reason.ON_NODEMANAGER_RESYNC + " : "
      + containers.keySet());

  List<ContainerId> containerIds =
    new ArrayList<ContainerId>(containers.keySet());

  LOG.info("Waiting for containers to be killed");

  this.handle(new CMgrCompletedContainersEvent(containerIds,
    CMgrCompletedContainersEvent.Reason.ON_NODEMANAGER_RESYNC));

  /*
   * We will wait till all the containers change their state to COMPLETE. We
   * will not remove the container statuses from nm context because these
   * are used while re-registering node manager with resource manager.
   */
  boolean allContainersCompleted = false;
  while (!containers.isEmpty() && !allContainersCompleted) {
    allContainersCompleted = true;
    for (Entry<ContainerId, Container> container : containers.entrySet()) {
      if (((ContainerImpl) container.getValue()).getCurrentState()
          != ContainerState.COMPLETE) {
        allContainersCompleted = false;
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ex) {
          LOG.warn("Interrupted while sleeping on container kill on resync",
            ex);
        }
        break;
      }
    }
  }
  // All containers killed
  if (allContainersCompleted) {
    LOG.info("All containers in DONE state");
  } else {
    LOG.info("Done waiting for containers to be killed. Still alive: " +
      containers.keySet());
  }
}
 
源代码17 项目: neoscada   文件: SummarizeChainItem.java
@Override
public Variant process ( final Variant value, final Map<String, Variant> attributes )
{
    attributes.put ( this.sumStateName, null );
    attributes.put ( this.sumCountName, null );
    attributes.put ( this.sumListName, null );

    long count = 0;
    final List<String> items = new LinkedList<String> ();
    final Set<String> ignoreItems = getIgnoreItems ();

    for ( final Map.Entry<String, Variant> entry : attributes.entrySet () )
    {
        final String attributeName = entry.getKey ();

        // ignore our own entries
        if ( !attributeName.equals ( this.sumStateName ) && !attributeName.equals ( this.sumCountName ) && !attributeName.equals ( this.sumListName ) && !ignoreItems.contains ( attributeName ) )
        {
            try
            {
                if ( matches ( value, attributeName, entry.getValue () ) )
                {
                    if ( entry.getValue () != null && entry.getValue ().asBoolean () )
                    {
                        count++;
                        items.add ( entry.getKey () );
                    }
                }
            }
            catch ( final Exception e )
            {
                logger.warn ( String.format ( "Failed to summarize item '%s'", attributeName ), e );
            }
        }
    }

    attributes.put ( this.sumStateName, Variant.valueOf ( count > 0 ) );
    attributes.put ( this.sumCountName, Variant.valueOf ( count ) );
    attributes.put ( this.sumListName, Variant.valueOf ( StringHelper.join ( items, ", " ) ) );

    // no change
    return null;
}
 
源代码18 项目: btree4j   文件: BTreeTest.java
@Test
public void test10m() throws BTreeException {
    File tmpDir = FileUtils.getTempDir();
    Assert.assertTrue(tmpDir.exists());
    File indexFile = new File(tmpDir, "test10m.idx");
    indexFile.deleteOnExit();
    if (indexFile.exists()) {
        Assert.assertTrue(indexFile.delete());
    }

    BTree btree = new BTree(indexFile, false);
    btree.init(false);

    final Map<Value, Long> kv = new HashMap<>();
    final Random rand = new Random();
    for (int i = 0; i < 10000000; i++) {
        long nt = System.nanoTime(), val = rand.nextInt(Integer.MAX_VALUE); // FIXME val = rand.nextLong();
        Value key = new Value(String.valueOf(nt) + val);
        btree.addValue(key, val);
        if (i % 10000 == 0) {
            kv.put(key, val);
            //println("put k: " + key + ", v: " + val);
        }
        Assert.assertEquals(val, btree.findValue(key));

        //if (i % 1000000 == 0) {
        //    btree.flush();
        //}
    }
    btree.flush(true, true);
    btree.close();

    Assert.assertTrue(indexFile.exists());
    println("File size of '" + FileUtils.getFileName(indexFile) + "': "
            + PrintUtils.prettyFileSize(indexFile));

    btree = new BTree(indexFile, false);
    btree.init(false);
    for (Entry<Value, Long> e : kv.entrySet()) {
        Value k = e.getKey();
        Long v = e.getValue();
        long result = btree.findValue(k);
        Assert.assertNotEquals("key is not registered: " + k, BTree.KEY_NOT_FOUND, result);
        Assert.assertEquals("Exexpected value '" + result + "' found for key: " + k,
            v.longValue(), result);
    }
}
 
源代码19 项目: big-c   文件: ParentQueue.java
@Override
public synchronized void reinitialize(CSQueue newlyParsedQueue,
    Resource clusterResource) throws IOException {
  // Sanity check
  if (!(newlyParsedQueue instanceof ParentQueue) ||
      !newlyParsedQueue.getQueuePath().equals(getQueuePath())) {
    throw new IOException("Trying to reinitialize " + getQueuePath() +
        " from " + newlyParsedQueue.getQueuePath());
  }

  ParentQueue newlyParsedParentQueue = (ParentQueue)newlyParsedQueue;

  // Set new configs
  setupQueueConfigs(clusterResource);

  // Re-configure existing child queues and add new ones
  // The CS has already checked to ensure all existing child queues are present!
  Map<String, CSQueue> currentChildQueues = getQueues(childQueues);
  Map<String, CSQueue> newChildQueues = 
      getQueues(newlyParsedParentQueue.childQueues);
  for (Map.Entry<String, CSQueue> e : newChildQueues.entrySet()) {
    String newChildQueueName = e.getKey();
    CSQueue newChildQueue = e.getValue();

    CSQueue childQueue = currentChildQueues.get(newChildQueueName);
    
    // Check if the child-queue already exists
    if (childQueue != null) {
      // Re-init existing child queues
      childQueue.reinitialize(newChildQueue, clusterResource);
      LOG.info(getQueueName() + ": re-configured queue: " + childQueue);
    } else {
      // New child queue, do not re-init
      
      // Set parent to 'this'
      newChildQueue.setParent(this);
      
      // Save in list of current child queues
      currentChildQueues.put(newChildQueueName, newChildQueue);
      
      LOG.info(getQueueName() + ": added new child queue: " + newChildQueue);
    }
  }

  // Re-sort all queues
  childQueues.clear();
  childQueues.addAll(currentChildQueues.values());
}
 
源代码20 项目: freeacs   文件: ActiveDeviceDetectionTask.java
@Override
public void runImpl() throws Throwable {
  long anHourAgo = System.currentTimeMillis() - 60 * 60000;
  logger.info(
      "ActiveDeviceDetectionTask: Will check if some devices scheduled to return before "
          + new Date(anHourAgo)
          + " are too late");
  Map<String, Long> inactiveUnits = cleanOld(anHourAgo);
  logger.info(
      "ActiveDeviceDetectionTask: Have found " + inactiveUnits.size() + " inactive devices");
  for (Entry<String, Long> entry : inactiveUnits.entrySet()) {
    String unitId = entry.getKey();
    Syslog syslog = dbi.getSyslog();
    SyslogFilter sf = new SyslogFilter();
    sf.setCollectorTmsStart(new Date(anHourAgo)); // look for syslog newer than 1 hour
    sf.setUnitId(unitId);
    boolean active = false;
    List<SyslogEntry> entries = syslog.read(sf, dbi.getAcs());
    for (SyslogEntry sentry : entries) {
      if (sentry.getFacility() < SyslogConstants.FACILITY_SHELL
          && !sentry.getContent().contains(Heartbeat.MISSING_HEARTBEAT_ID)) {
        logger.info(
            "ActivceDeviceDetection: Found syslog activity for unit "
                + unitId
                + " at "
                + sentry.getCollectorTimestamp()
                + " : "
                + sentry.getContent());
        active = true;
        break;
      }
    }
    if (active) {
      SyslogClient.info(
          entry.getKey(),
          "ProvMsg: No provisioning at "
              + new Date(entry.getValue())
              + " (as expected) or since, but device has been active since "
              + new Date(anHourAgo)
              + ". TR-069 client may have stopped",
          dbi.getSyslog());
      logger.info(
          "ActivceDeviceDetection: Unit "
              + entry.getKey()
              + ": No provisioning at "
              + new Date(entry.getValue())
              + " (as expected) or since, but device has been active "
              + new Date(anHourAgo)
              + ". TR-069 client may have stopped");
    } else {
      logger.info(
          "ActivceDeviceDetection: Unit "
              + entry.getKey()
              + ": No provisioning at "
              + new Date(entry.getValue())
              + " (as expected) or since, but device may be inactive since "
              + new Date(anHourAgo));
    }
  }
}