java.util.LinkedHashMap#forEach ( )源码实例Demo

下面列出了java.util.LinkedHashMap#forEach ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Flink-CEPplus   文件: PojoSerializerSnapshot.java
/**
 * Transforms the subclass serializer registry structure, {@code LinkedHashMap<Class<?>, TypeSerializer<?>>}
 * to 2 separate structures: a map containing with registered classes as key and their corresponding ids (order
 * in the original map) as value, as well as a separate array of the corresponding subclass serializers.
 */
@SuppressWarnings("unchecked")
private static Tuple2<LinkedHashMap<Class<?>, Integer>, TypeSerializer<Object>[]> decomposeSubclassSerializerRegistry(
	LinkedHashMap<Class<?>, TypeSerializer<?>> subclassSerializerRegistry) {

	final LinkedHashMap<Class<?>, Integer> subclassIds = new LinkedHashMap<>(subclassSerializerRegistry.size());
	final TypeSerializer[] subclassSerializers = new TypeSerializer[subclassSerializerRegistry.size()];

	subclassSerializerRegistry.forEach((registeredSubclassClass, serializer) -> {
		int id = subclassIds.size();
		subclassIds.put(registeredSubclassClass, id);
		subclassSerializers[id] = serializer;
	});

	return Tuple2.of(subclassIds, subclassSerializers);
}
 
源代码2 项目: flink   文件: PojoSerializer.java
/**
 * This legacy snapshot delegates compatibility checks to the {@link PojoSerializerSnapshot}.
 */
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(TypeSerializer<T> newSerializer) {
	LinkedHashMap<String, TypeSerializerSnapshot<?>> legacyFieldSerializerSnapshots =
		preprocessLegacySerializerSnapshotTuples(fieldToSerializerConfigSnapshot);

	int numFields = legacyFieldSerializerSnapshots.size();
	ArrayList<Field> fields = new ArrayList<>(numFields);
	ArrayList<TypeSerializerSnapshot<?>> fieldSerializerSnapshots = new ArrayList<>(numFields);
	legacyFieldSerializerSnapshots.forEach((fieldName, fieldSerializerSnapshot) -> {
		fields.add(PojoFieldUtils.getField(fieldName, getTypeClass()));
		fieldSerializerSnapshots.add(fieldSerializerSnapshot);
	});

	PojoSerializerSnapshot<T> newSnapshot = new PojoSerializerSnapshot<>(
		getTypeClass(),
		fields.toArray(new Field[numFields]),
		fieldSerializerSnapshots.toArray(new TypeSerializerSnapshot[numFields]),
		preprocessLegacySerializerSnapshotTuples(registeredSubclassesToSerializerConfigSnapshots),
		preprocessLegacySerializerSnapshotTuples(nonRegisteredSubclassesToSerializerConfigSnapshots));

	return newSnapshot.resolveSchemaCompatibility(newSerializer);
}
 
源代码3 项目: flink   文件: PojoSerializerSnapshot.java
/**
 * Transforms the subclass serializer registry structure, {@code LinkedHashMap<Class<?>, TypeSerializer<?>>}
 * to 2 separate structures: a map containing with registered classes as key and their corresponding ids (order
 * in the original map) as value, as well as a separate array of the corresponding subclass serializers.
 */
@SuppressWarnings("unchecked")
private static Tuple2<LinkedHashMap<Class<?>, Integer>, TypeSerializer<Object>[]> decomposeSubclassSerializerRegistry(
	LinkedHashMap<Class<?>, TypeSerializer<?>> subclassSerializerRegistry) {

	final LinkedHashMap<Class<?>, Integer> subclassIds = new LinkedHashMap<>(subclassSerializerRegistry.size());
	final TypeSerializer[] subclassSerializers = new TypeSerializer[subclassSerializerRegistry.size()];

	subclassSerializerRegistry.forEach((registeredSubclassClass, serializer) -> {
		int id = subclassIds.size();
		subclassIds.put(registeredSubclassClass, id);
		subclassSerializers[id] = serializer;
	});

	return Tuple2.of(subclassIds, subclassSerializers);
}
 
源代码4 项目: elasticsearch-carrot2   文件: ClusteringPlugin.java
/**
 * This places Lingo3G in front of the algorithm list if it is available.
 */
private LinkedHashMap<String, ClusteringAlgorithmProvider> reorderAlgorithms(
    LinkedHashMap<String, ClusteringAlgorithmProvider> providers) {
   String[] desiredOrder = {
       "Lingo3G",
       "Lingo",
       "STC",
       "Bisecting K-Means"
   };
   LinkedHashMap<String, ClusteringAlgorithmProvider> copy = new LinkedHashMap<>();
   for (String name : desiredOrder) {
      if (providers.containsKey(name)) {
         copy.put(name, providers.get(name));
      }
   }
   providers.forEach((name, provider) -> {
      if (!copy.containsKey(name)) {
         copy.put(name, provider);
      }
   });
   return copy;
}
 
源代码5 项目: DBus   文件: ConfigCenterService.java
public ResultEntity updateGlobalConf(LinkedHashMap<String, String> map) {
    ResultEntity resultEntity = new ResultEntity();
    try {
        resultEntity = initService.checkParams(resultEntity, map);
        if (resultEntity.getStatus() != 0) {
            return resultEntity;
        }
        //以下处理保留额外添加的特殊配置
        Properties properties = zkService.getProperties(Constants.GLOBAL_PROPERTIES_ROOT);
        properties.putAll(map);
        StringBuilder sb = new StringBuilder();
        map.forEach((k, v) -> sb.append(k).append("=").append(v).append("\n"));
        zkService.setData(Constants.GLOBAL_PROPERTIES_ROOT, sb.toString().getBytes("utf-8"));
    } catch (Exception e) {
        logger.error(e.getMessage(), e);
        resultEntity.setStatus(MessageCode.EXCEPTION);
        resultEntity.setMessage(e.getMessage());
    }
    return resultEntity;
}
 
源代码6 项目: dew   文件: ConfigBuilder.java
private static LinkedHashMap mergeItems(LinkedHashMap source, LinkedHashMap target) {
    target.forEach((k, v) -> {
        if (source.containsKey(k) && v instanceof LinkedHashMap) {
            // 如果源map和目标map都存在,并且存在子项目,递归合并
            // 并且存在子项目,递归合并
            target.put(k, mergeItems((LinkedHashMap) source.get(k), (LinkedHashMap) v));
        }
        // 否则不合并,即使用target的原始值
    });
    source.forEach((k, v) -> {
        if (!target.containsKey(k)) {
            // 添加 源map存在,目标map不存在的项目
            target.put(k, v);
        }
    });
    return target;
}
 
源代码7 项目: flink   文件: PojoSerializer.java
/**
 * This legacy snapshot delegates compatibility checks to the {@link PojoSerializerSnapshot}.
 */
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(TypeSerializer<T> newSerializer) {
	LinkedHashMap<String, TypeSerializerSnapshot<?>> legacyFieldSerializerSnapshots =
		preprocessLegacySerializerSnapshotTuples(fieldToSerializerConfigSnapshot);

	int numFields = legacyFieldSerializerSnapshots.size();
	ArrayList<Field> fields = new ArrayList<>(numFields);
	ArrayList<TypeSerializerSnapshot<?>> fieldSerializerSnapshots = new ArrayList<>(numFields);
	legacyFieldSerializerSnapshots.forEach((fieldName, fieldSerializerSnapshot) -> {
		fields.add(PojoFieldUtils.getField(fieldName, getTypeClass()));
		fieldSerializerSnapshots.add(fieldSerializerSnapshot);
	});

	PojoSerializerSnapshot<T> newSnapshot = new PojoSerializerSnapshot<>(
		getTypeClass(),
		fields.toArray(new Field[numFields]),
		fieldSerializerSnapshots.toArray(new TypeSerializerSnapshot[numFields]),
		preprocessLegacySerializerSnapshotTuples(registeredSubclassesToSerializerConfigSnapshots),
		preprocessLegacySerializerSnapshotTuples(nonRegisteredSubclassesToSerializerConfigSnapshots));

	return newSnapshot.resolveSchemaCompatibility(newSerializer);
}
 
源代码8 项目: EconomyAPI   文件: YamlProvider.java
@SuppressWarnings({ "unchecked", "serial" })
public void init(File path){
	file = new Config(new File(path, "Money.yml"), Config.YAML, new LinkedHashMap<String, Object>(){
		{
			put("version" , 2);
			put("money", new LinkedHashMap<String, Double>());
		}
	});
	
	LinkedHashMap<Object, Object> temp = (LinkedHashMap<Object, Object>) file.get("money");
	
	data = new LinkedHashMap<>();
	temp.forEach((key, money) -> {
		String username = key.toString();
		
		if(money instanceof Integer){
			data.put(username, ((Integer) money).doubleValue());
		}else if(money instanceof Double){
			data.put(username, (Double) money);
		}else if(money instanceof String){
			data.put(username, Double.parseDouble(money.toString()));
		}
	});
}
 
源代码9 项目: terracotta-platform   文件: RemoteCommand.java
protected final Map<InetSocketAddress, LogicalServerState> findRuntimePeersStatus(InetSocketAddress expectedOnlineNode) {
  logger.trace("findRuntimePeersStatus({})", expectedOnlineNode);
  Cluster cluster = getRuntimeCluster(expectedOnlineNode);
  logger.info("Connecting to: {} (this can take time if some nodes are not reachable)", toString(cluster.getNodeAddresses()));
  Collection<InetSocketAddress> addresses = cluster.getNodeAddresses();
  try (DiagnosticServices diagnosticServices = multiDiagnosticServiceProvider.fetchDiagnosticServices(addresses)) {
    LinkedHashMap<InetSocketAddress, LogicalServerState> status = addresses.stream()
        .collect(toMap(
            identity(),
            addr -> diagnosticServices.getDiagnosticService(addr).map(DiagnosticService::getLogicalServerState).orElse(UNREACHABLE),
            (o1, o2) -> {
              throw new UnsupportedOperationException();
            },
            LinkedHashMap::new));
    status.forEach((address, state) -> {
      if (state.isUnreacheable()) {
        logger.info(" - {} is not reachable", address);
      }
    });
    return status;
  }
}
 
源代码10 项目: flow   文件: ComponentEventBus.java
/**
 * Creates a list of data objects which can be passed to the constructor
 * returned by {@link #getEventConstructor(Class)} as parameters 3+.
 *
 * @param domEvent
 *            the DOM event containing the data
 * @param eventType
 *            the component event type
 * @return a list of event data objects in the same order as defined in the
 *         component event constructor
 */
private List<Object> createEventDataObjects(DomEvent domEvent,
        Class<? extends ComponentEvent<?>> eventType) {
    List<Object> eventDataObjects = new ArrayList<>();

    LinkedHashMap<String, Class<?>> expressions = ComponentEventBusUtil
            .getEventDataExpressions(eventType);
    expressions.forEach((expression, type) -> {
        JsonValue jsonValue = domEvent.getEventData().get(expression);
        if (jsonValue == null) {
            jsonValue = Json.createNull();
        }
        Object value = JsonCodec.decodeAs(jsonValue, type);
        eventDataObjects.add(value);
    });
    return eventDataObjects;
}
 
源代码11 项目: Flink-CEPplus   文件: PojoSerializerSnapshotData.java
/**
 * Creates a {@link PojoSerializerSnapshotData} from configuration of a {@link PojoSerializer}.
 *
 * <p>This factory method is meant to be used in regular write paths, i.e. when taking a snapshot
 * of the {@link PojoSerializer}. All registered subclass classes, and non-registered
 * subclass classes are all present. Some POJO fields may be absent, if the originating
 * {@link PojoSerializer} was a restored one with already missing fields, and was never replaced
 * by a new {@link PojoSerializer} (i.e. because the serialized old data was never accessed).
 */
static <T> PojoSerializerSnapshotData<T> createFrom(
		Class<T> pojoClass,
		Field[] fields,
		TypeSerializer<?>[] fieldSerializers,
		LinkedHashMap<Class<?>, TypeSerializer<?>> registeredSubclassSerializers,
		Map<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializers) {

	final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots = new LinkedOptionalMap<>(fields.length);

	for (int i = 0; i < fields.length; i++) {
		Field field = fields[i];
		String fieldName = (field == null) ? getDummyNameForMissingField(i) : field.getName();
		fieldSerializerSnapshots.put(fieldName, field, TypeSerializerUtils.snapshotBackwardsCompatible(fieldSerializers[i]));
	}

	LinkedHashMap<Class<?>, TypeSerializerSnapshot<?>> registeredSubclassSerializerSnapshots = new LinkedHashMap<>(registeredSubclassSerializers.size());
	registeredSubclassSerializers.forEach((k, v) -> registeredSubclassSerializerSnapshots.put(k, TypeSerializerUtils.snapshotBackwardsCompatible(v)));

	Map<Class<?>, TypeSerializerSnapshot<?>> nonRegisteredSubclassSerializerSnapshots = new HashMap<>(nonRegisteredSubclassSerializers.size());
	nonRegisteredSubclassSerializers.forEach((k, v) -> nonRegisteredSubclassSerializerSnapshots.put(k, TypeSerializerUtils.snapshotBackwardsCompatible(v)));

	return new PojoSerializerSnapshotData<>(
		pojoClass,
		fieldSerializerSnapshots,
		optionalMapOf(registeredSubclassSerializerSnapshots, Class::getName),
		optionalMapOf(nonRegisteredSubclassSerializerSnapshots, Class::getName));
}
 
源代码12 项目: vavr-jackson   文件: Serializer.java
private static String expectedMultimapJson(Multimap<?, ?> multimap, int opts) {
    final LinkedHashMap<Object, List<Object>> map = new LinkedHashMap<>();
    multimap.forEach(e -> {
        List<Object> list = map.computeIfAbsent(e._1, k -> new ArrayList<>());
        list.add(e._2);
    });
    StringBuilder sb = new StringBuilder("{");
    map.forEach((k, l) -> sb.append(expectedJson(k.toString(), opts)).append(":").append(expectedJson(io.vavr.collection.Stream.ofAll(l))));
    sb.append("}");
    return sb.toString();
}
 
源代码13 项目: quantumdb   文件: Main.java
private static void printHelp(CliWriter writer, LinkedHashMap<String, Command> commands) {
	writer.write("Available commands:")
			.indent(1);

	commands.forEach((command, delegate) -> {
		Identifier identifier = delegate.getIdentifier();
		writer.write(identifier.getCommand() + ": " + identifier.getDescription());
	});
	writer.indent(-1);
}
 
源代码14 项目: super-cloudops   文件: RandomDistributionTests.java
public static void gaussianRandomStreamTest1(String[] args) throws Exception {
	System.out.println("=========gaussianRandomStreamTest1===========");
	DoubleStream gaussianStream = Stream.generate(current()::nextGaussian).mapToDouble(e -> e);
	LinkedHashMap<Range, Integer> gaussianRangeCountMap = gaussianStream.filter(e -> (e >= -1.0 && e < 1.0)).limit(1000000)
			.boxed().map(Ranges::of)
			.collect(Ranges::emptyRangeCountMap, (m, e) -> m.put(e, m.get(e) + 1), Ranges::mergeRangeCountMaps);

	gaussianRangeCountMap.forEach((k, v) -> System.out.println(k.from() + "\t" + v));
}
 
private JsonObject getPipeLineResponse(LinkedHashMap<String, List<JsonObject>> map, JsonObject dataModel)
		throws InsightsCustomException {

	JsonArray pipeLineArray = new JsonArray();
	JsonObject pipeLineObject = new JsonObject();
	LinkedHashMap<String, String> sortedHandoverTimeMap = new LinkedHashMap<>();
	Set<Entry<String, List<JsonObject>>> keyset = map.entrySet();
	for (Map.Entry<String, List<JsonObject>> keyvaluePair : keyset) {
		List<JsonObject> limitedList = keyvaluePair.getValue().stream().limit(4).collect(Collectors.toList());
		limitedList.forEach(obj -> pipeLineArray.add(obj));
		// Handover time object extraction and sorting
		try {
			List<String> childNodes = getDownTool(keyvaluePair.getKey(), dataModel);
			for (String eachNode : childNodes) {
				String construct = keyvaluePair.getKey() + " To " + eachNode;
				sortedHandoverTimeMap.put(construct, handOverTimeMap.get(construct));
			}

		} catch (InsightsCustomException e) {
			LOG.debug(e.getMessage());
		}

	}
	/* Prepare Summary */
	JsonObject summaryObj = prepareSummary(map, dataModel);
	JsonArray summaryArray = new JsonArray();
	summaryArray.add(summaryObj);
	/* Timelag Response */
	JsonObject handOverTime = new JsonObject();
	sortedHandoverTimeMap.forEach((k, v) -> handOverTime.addProperty(k, v));
	JsonArray handOverArray = new JsonArray();
	handOverArray.add(handOverTime);
	/* Pipeline Response */
	pipeLineObject.add("pipeline", pipeLineArray);
	pipeLineObject.add("summary", summaryArray);
	pipeLineObject.add("timelag", handOverArray);
	return pipeLineObject;
}
 
源代码16 项目: lucene-solr   文件: ProtectedTermFilterFactory.java
private void populateInnerFilters(LinkedHashMap<String, Map<String, String>> wrappedFilterArgs) {
  List<TokenFilterFactory> innerFilters = new ArrayList<>();
  wrappedFilterArgs.forEach((filterName, filterArgs) -> {
    int idSuffixPos = filterName.indexOf(FILTER_NAME_ID_SEPARATOR); // Format: SPIname[-id]
    if (idSuffixPos != -1) {                                        // Strip '-id' suffix, if any, prior to SPI lookup
      filterName = filterName.substring(0, idSuffixPos);
    }
    innerFilters.add(TokenFilterFactory.forName(filterName, filterArgs));
  });
  setInnerFilters(innerFilters);
}
 
源代码17 项目: java-swing-tips   文件: MainPanel.java
private MainPanel() {
  super(new GridLayout(1, 3));

  DefaultListModel<String> model = new DefaultListModel<>();
  model.addElement("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
  model.addElement("aaaa");
  model.addElement("aaaabbb");
  model.addElement("aaaabbbcc");
  model.addElement("1234567890abcdefghijklmnopqrstuvwxyz");
  model.addElement("bbb1");
  model.addElement("bbb12");
  model.addElement("1234567890-+*/=ABCDEFGHIJKLMNOPQRSTUVWXYZ");
  model.addElement("bbb123");

  JList<String> list1 = new TooltipList<String>(model) {
    @Override public void updateUI() {
      super.updateUI();
      setCellRenderer(new TooltipListCellRenderer<>());
    }
  };

  JList<String> list2 = new CellRendererTooltipList<String>(model) {
    @Override public void updateUI() {
      super.updateUI();
      setCellRenderer(new TooltipListCellRenderer<>());
    }
  };

  JList<String> list3 = new JList<String>(model) {
    @Override public void updateUI() {
      super.updateUI();
      setCellRenderer(new TooltipListCellRenderer<>());
    }
  };

  LinkedHashMap<String, Component> map = new LinkedHashMap<>();
  map.put("CellBounds", list1);
  map.put("ListCellRenderer", list2);
  map.put("Default location", list3);
  map.forEach((title, c) -> add(makeTitledPanel(title, c)));
  setPreferredSize(new Dimension(320, 240));
}
 
源代码18 项目: flink   文件: PojoSerializerSnapshot.java
/**
 * Transforms a {@link LinkedHashMap} with {@link TypeSerializerSnapshot}s as
 * the value to {@link TypeSerializer} as the value by restoring the snapshot.
 */
private static <K> LinkedHashMap<K, TypeSerializer<?>> restoreSerializers(LinkedHashMap<K, TypeSerializerSnapshot<?>> snapshotsMap) {
	final LinkedHashMap<K, TypeSerializer<?>> restoredSerializersMap = new LinkedHashMap<>(snapshotsMap.size());
	snapshotsMap.forEach((key, snapshot) -> restoredSerializersMap.put(key, snapshot.restoreSerializer()));
	return restoredSerializersMap;
}
 
源代码19 项目: flink   文件: PojoSerializerSnapshot.java
/**
 * Transforms a {@link LinkedHashMap} with {@link TypeSerializerSnapshot}s as
 * the value to {@link TypeSerializer} as the value by restoring the snapshot.
 */
private static <K> LinkedHashMap<K, TypeSerializer<?>> restoreSerializers(LinkedHashMap<K, TypeSerializerSnapshot<?>> snapshotsMap) {
	final LinkedHashMap<K, TypeSerializer<?>> restoredSerializersMap = new LinkedHashMap<>(snapshotsMap.size());
	snapshotsMap.forEach((key, snapshot) -> restoredSerializersMap.put(key, snapshot.restoreSerializer()));
	return restoredSerializersMap;
}
 
源代码20 项目: datacollector   文件: TableContextUtil.java
/**
 * Determines if there are invalid values specified in the initial offset value
 * for columns.
 */
//@VisibleForTesting
void checkForInvalidInitialOffsetValues(
    PushSource.Context context,
    List<Stage.ConfigIssue> issues,
    String qualifiedTableName,
    LinkedHashMap<String, Integer> offsetColumnToType,
    Map<String, String> offsetColumnToStartOffset
) throws StageException {
  List<String> invalidInitialOffsetFieldAndValue =  new ArrayList<>();
  offsetColumnToType.forEach((offsetColumn, offsetSqlType) -> {
    String initialOffsetValue = offsetColumnToStartOffset.get(offsetColumn);
    try {
      if (jdbcUtil.isSqlTypeOneOf(offsetSqlType, Types.DATE, Types.TIME, Types.TIMESTAMP)) {
        if (jdbcUtil.isSqlTypeOneOf(offsetSqlType, Types.TIMESTAMP)) {
          if (!isTimestampWithNanosFormat(initialOffsetValue)) {
            Long.valueOf(initialOffsetValue);
          }
        } else {
          Long.valueOf(initialOffsetValue);
        }

      } else {
        //Use native field conversion strategy to conver string to specify type and get value
        Field.create(OffsetQueryUtil.SQL_TYPE_TO_FIELD_TYPE.get(offsetSqlType), initialOffsetValue).getValue();
      }
    } catch (IllegalArgumentException e) {
      LOG.error(
          Utils.format(
              "Invalid Initial Offset Value {} for column {} in table {}",
              initialOffsetValue,
              offsetColumn,
              qualifiedTableName
          ),
          e
      );
      invalidInitialOffsetFieldAndValue.add(offsetColumn + " - " + initialOffsetValue);
    }
  });
  if (!invalidInitialOffsetFieldAndValue.isEmpty()) {
    throw new StageException(
        JdbcErrors.JDBC_72,
        qualifiedTableName,
        COMMA_JOINER.join(invalidInitialOffsetFieldAndValue)
    );
  }
}