java.util.LinkedHashMap#clear ( )源码实例Demo

下面列出了java.util.LinkedHashMap#clear ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

public static void main(String[] args) {

    //create LinkedHashMap object
    LinkedHashMap lHashMap = new LinkedHashMap();

    //add key value pairs to LinkedHashMap
    lHashMap.put("1", "One");
    lHashMap.put("2", "Two");
    lHashMap.put("3", "Three");

    /*
      To remove all values or clear LinkedHashMap use
      void clear method() of LinkedHashMap class. Clear method removes all
      key value pairs contained in LinkedHashMap.
    */

    lHashMap.clear();

    System.out.println("Total key value pairs in LinkedHashMap are : " + lHashMap.size());
  }
 
源代码2 项目: javaide   文件: EmptyLinkedHashMapExample.java
public static void main(String[] args) {

        //create LinkedHashMap object
        LinkedHashMap lHashMap = new LinkedHashMap();

        //add key value pairs to LinkedHashMap
        lHashMap.put("1", "One");
        lHashMap.put("2", "Two");
        lHashMap.put("3", "Three");

    /*
      To remove all values or clear LinkedHashMap use
      void clear method() of LinkedHashMap class. Clear method removes all
      key value pairs contained in LinkedHashMap.
    */

        lHashMap.clear();

        System.out.println("Total key value pairs in LinkedHashMap are : " + lHashMap.size());
    }
 
/**
 * This method associates module handlers to the child modules of composite module types. It links module types of
 * child modules to the rule which contains this composite module. It also resolve links between child configuration
 * properties and configuration of composite module see:
 * {@link ReferenceResolver#updateConfiguration(Configuration, Map, Logger)}.
 *
 * @param compositeConfig configuration values of composite module.
 * @param childModules list of child modules
 * @param childModulePrefix defines UID of child module. The rule id is not enough for prefix when a composite type
 *            is used more then one time in one and the same rule. For example the prefix can be:
 *            ruleId:compositeModuleId:compositeModileId2.
 * @return map of pairs of module and its handler. Return null when some of the child modules can not find its
 *         handler.
 */
@SuppressWarnings("unchecked")
private <T extends Module, MT extends ModuleHandler> LinkedHashMap<T, MT> getChildHandlers(String compositeModuleId,
        Configuration compositeConfig, List<T> childModules, String childModulePrefix) {
    LinkedHashMap<T, MT> mapModuleToHandler = new LinkedHashMap<>();
    for (T child : childModules) {
        String ruleId = getRuleId(childModulePrefix);
        ruleEngine.updateMapModuleTypeToRule(ruleId, child.getTypeUID());
        ModuleHandlerFactory childMhf = ruleEngine.getModuleHandlerFactory(child.getTypeUID());
        if (childMhf == null) {
            mapModuleToHandler.clear();
            mapModuleToHandler = null;
            return null;
        }
        ReferenceResolver.updateConfiguration(child.getConfiguration(), compositeConfig.getProperties(), logger);
        MT childHandler = (MT) childMhf.getHandler(child, childModulePrefix + ":" + compositeModuleId);

        if (childHandler == null) {
            mapModuleToHandler.clear();
            mapModuleToHandler = null;
            return null;
        }
        mapModuleToHandler.put(child, childHandler);
    }
    return mapModuleToHandler;
}
 
源代码4 项目: ambry   文件: DeleteManagerTest.java
/**
 * Tests to ensure that {@link RouterErrorCode}s are properly resolved based on precedence
 * @throws Exception
 */
@Test
public void routerErrorCodeResolutionTest() throws Exception {
  LinkedHashMap<ServerErrorCode, RouterErrorCode> codesToSetAndTest = new LinkedHashMap<>();
  // test 4 codes
  codesToSetAndTest.put(ServerErrorCode.Blob_Authorization_Failure, RouterErrorCode.BlobAuthorizationFailure);
  codesToSetAndTest.put(ServerErrorCode.Blob_Expired, RouterErrorCode.BlobExpired);
  codesToSetAndTest.put(ServerErrorCode.Disk_Unavailable, RouterErrorCode.AmbryUnavailable);
  codesToSetAndTest.put(ServerErrorCode.IO_Error, RouterErrorCode.UnexpectedInternalError);
  doRouterErrorCodeResolutionTest(codesToSetAndTest);

  // test another 4 codes
  codesToSetAndTest.clear();
  codesToSetAndTest.put(ServerErrorCode.Blob_Authorization_Failure, RouterErrorCode.BlobAuthorizationFailure);
  codesToSetAndTest.put(ServerErrorCode.Disk_Unavailable, RouterErrorCode.AmbryUnavailable);
  codesToSetAndTest.put(ServerErrorCode.Replica_Unavailable, RouterErrorCode.AmbryUnavailable);
  codesToSetAndTest.put(ServerErrorCode.Partition_Unknown, RouterErrorCode.UnexpectedInternalError);
  doRouterErrorCodeResolutionTest(codesToSetAndTest);
}
 
源代码5 项目: sqlg   文件: TestBatchStreamEdge.java
@Test(expected = IllegalStateException.class)
public void testEdgePropertiesSameOrder() {
    SqlgVertex v1 = (SqlgVertex) this.sqlgGraph.addVertex(T.label, "A");
    SqlgVertex v2 = (SqlgVertex) this.sqlgGraph.addVertex(T.label, "A");
    this.sqlgGraph.tx().commit();
    this.sqlgGraph.tx().streamingBatchModeOn();
    LinkedHashMap<String, Object> keyValues = new LinkedHashMap<>();
    keyValues.put("name", "halo");
    keyValues.put("surname", "test");
    v1.streamEdge("a", v2, keyValues);
    keyValues.clear();
    keyValues.put("surname", "test");
    keyValues.put("name", "halo");
    v1.streamEdge("a", v2, keyValues);
    Assert.fail();
}
 
源代码6 项目: ambry   文件: TtlUpdateManagerTest.java
/**
 * Tests to ensure that {@link RouterErrorCode}s are properly resolved based on precedence
 * @throws Exception
 */
@Test
public void routerErrorCodeResolutionTest() throws Exception {
  LinkedHashMap<ServerErrorCode, RouterErrorCode> codesToSetAndTest = new LinkedHashMap<>();

  // test 4 codes
  codesToSetAndTest.put(ServerErrorCode.Blob_Deleted, RouterErrorCode.BlobDeleted);
  codesToSetAndTest.put(ServerErrorCode.Blob_Expired, RouterErrorCode.BlobExpired);
  codesToSetAndTest.put(ServerErrorCode.Blob_Update_Not_Allowed, RouterErrorCode.BlobUpdateNotAllowed);
  codesToSetAndTest.put(ServerErrorCode.Disk_Unavailable, RouterErrorCode.AmbryUnavailable);
  doRouterErrorCodeResolutionTest(codesToSetAndTest);

  // test another 4 codes
  codesToSetAndTest.clear();
  codesToSetAndTest.put(ServerErrorCode.Blob_Authorization_Failure, RouterErrorCode.BlobAuthorizationFailure);
  codesToSetAndTest.put(ServerErrorCode.Blob_Update_Not_Allowed, RouterErrorCode.BlobUpdateNotAllowed);
  codesToSetAndTest.put(ServerErrorCode.Disk_Unavailable, RouterErrorCode.AmbryUnavailable);
  codesToSetAndTest.put(ServerErrorCode.IO_Error, RouterErrorCode.UnexpectedInternalError);
  doRouterErrorCodeResolutionTest(codesToSetAndTest);
}
 
源代码7 项目: smarthome   文件: CompositeModuleHandlerFactory.java
/**
 * This method associates module handlers to the child modules of composite module types. It links module types of
 * child modules to the rule which contains this composite module. It also resolve links between child configuration
 * properties and configuration of composite module see:
 * {@link ReferenceResolver#updateConfiguration(Configuration, Map, Logger)}.
 *
 * @param compositeConfig   configuration values of composite module.
 * @param childModules      list of child modules
 * @param childModulePrefix defines UID of child module. The rule id is not enough for prefix when a composite type
 *                          is used more then one time in one and the same rule. For example the prefix can be:
 *                          ruleId:compositeModuleId:compositeModileId2.
 * @return map of pairs of module and its handler. Return null when some of the child modules can not find its
 *         handler.
 */
@SuppressWarnings("unchecked")
private <T extends Module, MT extends ModuleHandler> LinkedHashMap<T, MT> getChildHandlers(String compositeModuleId,
        Configuration compositeConfig, List<T> childModules, String childModulePrefix) {
    LinkedHashMap<T, MT> mapModuleToHandler = new LinkedHashMap<T, MT>();
    for (T child : childModules) {
        String ruleId = getRuleId(childModulePrefix);
        ruleEngine.updateMapModuleTypeToRule(ruleId, child.getTypeUID());
        ModuleHandlerFactory childMhf = ruleEngine.getModuleHandlerFactory(child.getTypeUID());
        if (childMhf == null) {
            mapModuleToHandler.clear();
            mapModuleToHandler = null;
            return null;
        }
        ReferenceResolver.updateConfiguration(child.getConfiguration(), compositeConfig.getProperties(), logger);
        MT childHandler = (MT) childMhf.getHandler(child, childModulePrefix + ":" + compositeModuleId);

        if (childHandler == null) {
            mapModuleToHandler.clear();
            mapModuleToHandler = null;
            return null;
        }
        mapModuleToHandler.put(child, childHandler);
    }
    return mapModuleToHandler;
}
 
源代码8 项目: openemm   文件: MapUtils.java
/**
 * Reorders the entries of a LinkedHashMap (an ordered map) using given comparator.
 * 
 * @param map map to reorder
 * @param order comparator defining new order
 */
public static final <K,V> void reorderLinkedHashMap(final LinkedHashMap<K, V> map, final Comparator<Map.Entry<K,V>> order) {
	final List<Map.Entry<K,V>> orderedList = new ArrayList<>(map.entrySet());
	orderedList.sort(order);
	
	map.clear();
	for(Map.Entry<K, V> entry : orderedList) {
		map.put(entry.getKey(), entry.getValue());
	}
}
 
private static ArrayList<Entry<String, Pattern>> createTitleRegexps(JobOutput jobOutput) throws Exception {
	ArrayList<Entry<String, Pattern>> titleRegexps = new ArrayList<Entry<String, Pattern>>();
	jobOutput.println(LEGACY_TITLE_REGEXP.size() + " legacy system message code patterns");
	titleRegexps.addAll(LEGACY_TITLE_REGEXP);
	LinkedHashMap<Locale, Locales> locales = new LinkedHashMap<Locale, Locales>();
	locales.put(L10nUtil.getLocale(Locales.JOURNAL), Locales.JOURNAL);
	locales.put(L10nUtil.getLocale(Locales.EN), Locales.EN);
	locales.put(L10nUtil.getLocale(Locales.DE), Locales.DE);
	Iterator<Locales> localesIt = locales.values().iterator();
	while (localesIt.hasNext()) {
		Locales locale = localesIt.next();
		LinkedHashMap<String, String> titleFormatMap = new LinkedHashMap<String, String>(CoreUtil.SYSTEM_MESSAGE_CODES.size());
		Iterator<String> codesIt = CoreUtil.SYSTEM_MESSAGE_CODES.iterator();
		while (codesIt.hasNext()) {
			String code = codesIt.next();
			String titleFormat = L10nUtil.getSystemMessageTitleFormat(locale, code);
			if (!CommonUtil.isEmptyString(titleFormat)) {
				if (!titleFormatMap.containsKey(code)) {
					titleFormatMap.put(code, titleFormat);
				} else {
					throw new Exception("duplicate " + locale.name() + " system message title format " + titleFormat);
				}
			} else {
				throw new Exception("empty " + locale.name() + " system message title format for " + code);
			}
		}
		ArrayList<Entry<String, String>> titleFormatList = new ArrayList<Entry<String, String>>(titleFormatMap.entrySet());
		titleFormatMap.clear();
		Collections.sort(titleFormatList, TITLE_FORMAT_COMPARATOR);
		Iterator<Entry<String, String>> titleFormatIt = titleFormatList.iterator();
		while (titleFormatIt.hasNext()) {
			Entry<String, String> codeTitleFormat = titleFormatIt.next();
			titleRegexps.add(new AbstractMap.SimpleEntry<String, Pattern>(codeTitleFormat.getKey(), CommonUtil.createMessageFormatRegexp(codeTitleFormat.getValue(), false)));
		}
		jobOutput.println(locale.name() + ": " + titleFormatList.size() + " system message code patterns");
	}
	jobOutput.println(titleRegexps.size() + " system message code patterns overall");
	return titleRegexps;
}
 
源代码10 项目: manifold   文件: ManLog_8.java
void recordRecentSuspendedIssuesAndRemoveOthers( JCTree tree )
{
  LinkedHashMap<JCTree, Stack<Stack<JCDiagnostic>>> suspendedIssues =
    _suspendedIssues.get( getDiagnosticHandler() );

  Stack<Stack<JCDiagnostic>> issues = suspendedIssues.get( tree );
  Stack<JCDiagnostic> currentIssues = issues.pop();
  issues.clear();
  issues.push( currentIssues );
  if( isRootFrame( tree ) )
  {
    recordSuspendedIssues();
    suspendedIssues.clear();
  }
}
 
源代码11 项目: YalpStore   文件: Util.java
static public Map<String, String> addToStart(LinkedHashMap<String, String> map, String key, String value) {
    LinkedHashMap<String, String> clonedMap = (LinkedHashMap<String, String>) map.clone();
    map.clear();
    map.put(key, value);
    map.putAll(clonedMap);
    return map;
}
 
源代码12 项目: mtas   文件: CodecInfo.java
/**
 * Gets the prefixes.
 *
 * @param field
 *          the field
 * @return the prefixes
 */
private LinkedHashMap<String, Long> getPrefixRefs(String field) {
  if (fieldReferences.containsKey(field)) {
    FieldReferences fr = fieldReferences.get(field);
    if (!prefixReferences.containsKey(field)) {
      LinkedHashMap<String, Long> refs = new LinkedHashMap<String, Long>();
      try {
        IndexInput inPrefix = indexInputList.get("prefix");
        inPrefix.seek(fr.refPrefix);
        for (int i = 0; i < fr.numberOfPrefixes; i++) {
          Long ref = inPrefix.getFilePointer();
          String prefix = inPrefix.readString();
          refs.put(prefix, ref);
        }
      } catch (Exception e) {
        log.error(e);
        refs.clear();
      }
      prefixReferences.put(field, refs);
      return refs;
    } else {
      return prefixReferences.get(field);
    }
  } else {
    return null;
  }
}
 
源代码13 项目: sqlg   文件: TestBatchStreamEdge.java
@Test(expected = IllegalStateException.class)
public void testEdgePropertiesRemainsTheSame() {
    SqlgVertex v1 = (SqlgVertex) this.sqlgGraph.addVertex(T.label, "A");
    SqlgVertex v2 = (SqlgVertex) this.sqlgGraph.addVertex(T.label, "A");
    this.sqlgGraph.tx().commit();
    this.sqlgGraph.tx().streamingBatchModeOn();
    LinkedHashMap<String, Object> keyValues = new LinkedHashMap<>();
    keyValues.put("name", "halo");
    v1.streamEdge("a", v2, keyValues);
    keyValues.clear();
    keyValues.put("namea", "halo");
    v1.streamEdge("a", v2, keyValues);
    Assert.fail();
}
 
源代码14 项目: energy2d   文件: MenuBar.java
private void createMenu(final System2D box, JMenu menu, LinkedHashMap<String, String> templates) {
    JMenuItem mi;
    for (Map.Entry<String, String> x : templates.entrySet()) {
        mi = new JMenuItem(x.getKey());
        final String val = x.getValue();
        mi.addActionListener(e -> box.loadModel(val));
        menu.add(mi);
    }
    templates.clear();
}
 
源代码15 项目: DBus   文件: ConfigCenterService.java
public int ResetMgrDB(LinkedHashMap<String, String> map) throws Exception {
    Connection connection = null;
    try {
        String content = map.get("content");
        map.clear();
        String[] split = content.split("\n");
        for (String s : split) {
            String replace = s.replace("\r", "");
            String[] pro = replace.split("=", 2);
            if (pro != null && pro.length == 2) {
                map.put(pro[0], pro[1]);
            }
        }
        logger.info(map.toString());
        String driverClassName = map.get("driverClassName");
        String url = map.get("url");
        String username = map.get("username");
        String password = map.get("password");
        Class.forName(driverClassName);
        connection = DriverManager.getConnection(url, username, password);

        zkService.setData(KeeperConstants.MGR_DB_CONF, content.getBytes("utf-8"));

        //重置mgr数据库
        ResponseEntity<ResultEntity> res = sender.get(ServiceNames.KEEPER_SERVICE, "/toolSet/initMgrSql");
        if (res.getBody().getStatus() != 0) {
            return MessageCode.DBUS_MGR_INIT_ERROR;
        }
        logger.info("重置mgr数据库完成.");

        //超级管理员添加
        User u = new User();
        u.setRoleType("admin");
        u.setStatus("active");
        u.setUserName("超级管理员");
        u.setPassword(DBusUtils.md5("12345678"));
        u.setEmail("admin");
        u.setPhoneNum("13000000000");
        u.setUpdateTime(new Date());
        res = sender.post(ServiceNames.KEEPER_SERVICE, "/users/create", u);
        if (res.getBody().getStatus() != 0) {
            return MessageCode.CREATE_SUPER_USER_ERROR;
        }
        logger.info("添加超级管理员完成.");
        return 0;
    } catch (SQLException e) {
        logger.error(e.getMessage(), e);
        return MessageCode.DBUS_MGR_DB_FAIL_WHEN_CONNECT;
    } finally {
        if (connection != null) {
            connection.close();
        }
    }
}
 
源代码16 项目: biojava   文件: ResidualsCoxph.java
/**
 * From R in residuals.coxph.S rowsum(rr, collapse)
 *
 * @param rr
 * @param sets
 * @return
 */
private static double[][] rowsum(double[][] rr, ArrayList<String> sets) throws Exception {
	LinkedHashMap<String, Double> sumMap = new LinkedHashMap<String, Double>();
	if (rr.length != sets.size()) {
		throw new Exception("Cluster value for each sample are not of equal length n=" + rr.length + " cluster length=" + sets.size());
	}
	double[][] sum = null;
	for (int j = 0; j < rr[0].length; j++) {
		for (int i = 0; i < sets.size(); i++) {
			String s = sets.get(i);
			Double v = sumMap.get(s); //get in order
			if (v == null) {
				v = 0.0;
			}
			v = v + rr[i][j];
			sumMap.put(s, v);

		}
		if (sum == null) {
			sum = new double[sumMap.size()][rr[0].length];
		}

		ArrayList<String> index = new ArrayList<String>(sumMap.keySet());
		//sorting does seem to make a difference in test cases at the .0000000001
   //     ArrayList<Integer> in = new ArrayList<Integer>();
   //     for (String s : index) {
   //         in.add(Integer.parseInt(s));
   //     }
   //     Collections.sort(index);

		for (int m = 0; m < index.size(); m++) {
			String key = index.get(m);
			sum[m][j] = sumMap.get(key);
		}

		sumMap.clear();
	}

	return sum;

}
 
源代码17 项目: incubator-hivemall   文件: DecisionTreeTest.java
private static void runTracePredict(String datasetUrl, int responseIndex, int numLeafs)
        throws IOException, ParseException {
    URL url = new URL(datasetUrl);
    InputStream is = new BufferedInputStream(url.openStream());

    ArffParser arffParser = new ArffParser();
    arffParser.setResponseIndex(responseIndex);

    AttributeDataset ds = arffParser.parse(is);
    final Attribute[] attrs = ds.attributes();
    final Attribute targetAttr = ds.response();

    double[][] x = ds.toArray(new double[ds.size()][]);
    int[] y = ds.toArray(new int[ds.size()]);

    Random rnd = new Random(43L);
    int numTrain = (int) (x.length * 0.7);
    int[] index = ArrayUtils.shuffle(MathUtils.permutation(x.length), rnd);
    int[] cvTrain = Arrays.copyOf(index, numTrain);
    int[] cvTest = Arrays.copyOfRange(index, numTrain, index.length);

    double[][] trainx = Math.slice(x, cvTrain);
    int[] trainy = Math.slice(y, cvTrain);
    double[][] testx = Math.slice(x, cvTest);

    DecisionTree tree = new DecisionTree(SmileExtUtils.convertAttributeTypes(attrs),
        matrix(trainx, false), trainy, numLeafs, RandomNumberGeneratorFactory.createPRNG(43L));

    final LinkedHashMap<String, Double> map = new LinkedHashMap<>();
    final StringBuilder buf = new StringBuilder();
    for (int i = 0; i < testx.length; i++) {
        final DenseVector test = new DenseVector(testx[i]);
        tree.predict(test, new PredictionHandler() {

            @Override
            public void visitBranch(Operator op, int splitFeatureIndex, double splitFeature,
                    double splitValue) {
                buf.append(attrs[splitFeatureIndex].name);
                buf.append(" [" + splitFeature + "] ");
                buf.append(op);
                buf.append(' ');
                buf.append(splitValue);
                buf.append('\n');

                map.put(attrs[splitFeatureIndex].name + " [" + splitFeature + "] " + op,
                    splitValue);
            }

            @Override
            public void visitLeaf(int output, double[] posteriori) {
                buf.append(targetAttr.toString(output));
            }
        });

        Assert.assertTrue(buf.length() > 0);
        Assert.assertFalse(map.isEmpty());

        StringUtils.clear(buf);
        map.clear();
    }

}
 
源代码18 项目: che   文件: TopologicalSort.java
/**
 * Given the function for determining the predecessors of the nodes, return the list of the nodes
 * in topological order. I.e. all predecessors will be placed sooner in the list than their
 * successors. Note that the input collection is assumed to contain no duplicate entries as
 * determined by the equality of the {@code ID} type. If such duplicates are present in the input
 * collection, the output list will only contain the first instance of the duplicates from the
 * input collection.
 *
 * <p>The implemented sort algorithm is stable. If there is no relationship between 2 nodes, they
 * retain the relative position to each other as they had in the provided collection (e.g. if "a"
 * preceded "b" in the original collection and there is no relationship between them (as
 * determined by the predecessor function), the "a" will still precede "b" in the resulting list.
 * Other nodes may be inserted in between them though in the result).
 *
 * <p>The cycles in the graph determined by the predecessor function are ignored and nodes in the
 * cycle are placed into the output list in the source order.
 *
 * @param nodes the collection of nodes
 * @return the list of nodes sorted in topological order
 */
public List<N> sort(Collection<N> nodes) {
  // the linked hashmap is important to retain the original order of elements unless required
  // by the dependencies between nodes
  LinkedHashMap<ID, NodeInfo<ID, N>> nodeInfos = newLinkedHashMapWithExpectedSize(nodes.size());
  List<NodeInfo<ID, N>> results = new ArrayList<>(nodes.size());

  int pos = 0;
  boolean needsSorting = false;
  for (N node : nodes) {
    ID nodeID = identityExtractor.apply(node);
    // we need the set to be modifiable, so let's make our own
    Set<ID> preds = new HashSet<>(directPredecessorsExtractor.apply(node));
    needsSorting = needsSorting || !preds.isEmpty();

    NodeInfo<ID, N> nodeInfo = nodeInfos.computeIfAbsent(nodeID, __ -> new NodeInfo<>());
    nodeInfo.id = nodeID;
    nodeInfo.predecessors = preds;
    nodeInfo.sourcePosition = pos++;
    nodeInfo.node = node;

    for (ID pred : preds) {
      // note that this means that we're inserting the nodeinfos into the map in an incorrect
      // order and will have to sort them in the source order before we do the actual topo sort.
      // We take that cost because we gamble on there being no dependencies in the nodes as a
      // common case.
      NodeInfo<ID, N> predNode = nodeInfos.computeIfAbsent(pred, __ -> new NodeInfo<>());
      if (predNode.successors == null) {
        predNode.successors = new HashSet<>();
      }
      predNode.successors.add(nodeID);
    }
  }

  if (needsSorting) {
    // because of the predecessors, we have put the nodeinfos in the map in an incorrect order.
    // we need to correct that before we try to sort...
    TreeSet<NodeInfo<ID, N>> tmp = new TreeSet<>(Comparator.comparingInt(a -> a.sourcePosition));
    tmp.addAll(nodeInfos.values());
    nodeInfos.clear();
    tmp.forEach(ni -> nodeInfos.put(ni.id, ni));

    // now we're ready to produce the results
    sort(nodeInfos, results);
  } else {
    // we don't need to sort, but we need to keep the expected behavior of removing the duplicates
    results = new ArrayList<>(nodeInfos.values());
  }

  return results.stream().map(ni -> ni.node).collect(Collectors.toList());
}
 
源代码19 项目: pom-manipulation-ext   文件: WildcardMap.java
/**
 * Associates the specified value with the specified key in this map.
 * @param key key to associate with
 * @param value value to associate with the key
 */
public void put(ProjectRef key, T value)
{
    String groupId = key.getGroupId();
    String artifactId = key.getArtifactId();

    LinkedHashMap<String,T> vMap = map.get(groupId);
    if ( vMap == null)
    {
        vMap = new LinkedHashMap<>();
    }
    boolean wildcard = false;

    if ( WILDCARD.equals(artifactId))
    {
        // Erase any previous mappings.
        if (!vMap.isEmpty())
        {
            logger.warn ("Emptying map with keys " + vMap.keySet() + " as replacing with wildcard mapping " + key);
        }
        vMap.clear();
    }
    else
    {
        for ( Object o : vMap.keySet() )
        {
            if ( o.equals( WILDCARD ) )
            {
                wildcard = true;
                break;
            }
        }
    }
    if ( wildcard )
    {
        logger.warn ("Unable to add " + key + " with value " + value +
                " as wildcard mapping for " + groupId + " already exists.");
    }
    else
    {
        vMap.put(artifactId, value);
        map.put(groupId, vMap);
    }
}
 
源代码20 项目: io   文件: ODataBatchResource.java
/**
 * NP経由ユーザデータをバルク登録する.
 * @param npBulkContexts NavigationPropertyコンテキストのリスト
 */
private void execBulkRequestForNavigationProperty(List<NavigationPropertyBulkContext> npBulkContexts) {
    // バルク登録用にコンテキストからBulkRequestを作成
    // NP側のEntityTypeの存在チェック、バルクデータ内でのID競合チェックもここで行う
    LinkedHashMap<String, BulkRequest> npBulkRequests = new LinkedHashMap<String, BulkRequest>();
    for (NavigationPropertyBulkContext npBulkContext : npBulkContexts) {
        BatchBodyPart bodyPart = npBulkContext.getBodyPart();
        BulkRequest bulkRequest = new BulkRequest(bodyPart);
        String key = DcUUID.randomUUID();

        if (npBulkContext.isError()) {
            bulkRequest.setError(npBulkContext.getException());
            npBulkRequests.put(key, bulkRequest);
            continue;
        }

        String targetEntitySetName = bodyPart.getTargetEntitySetName();
        bulkRequest = createBulkRequest(bodyPart, targetEntitySetName);
        // データ内でのID競合チェック
        // TODO 複合主キー対応、ユニークキーのチェック、NTKP対応
        if (bulkRequest.getError() == null) {
            EntitySetDocHandler docHandler = bulkRequest.getDocHandler();
            key = docHandler.getEntityTypeId() + ":" + (String) docHandler.getStaticFields().get("__id");
            if (npBulkRequests.containsKey(key)) {
                key = DcUUID.randomUUID();
                bulkRequest.setError(DcCoreException.OData.ENTITY_ALREADY_EXISTS);
            }
        }

        npBulkRequests.put(key, bulkRequest);
    }

    try {
        this.odataResource.getODataProducer().bulkCreateEntityViaNavigationProperty(npBulkContexts, npBulkRequests);
    } catch (DcCoreException e) {
        // 503が発生した後の処理を継続させるため、shutterにステータスを設定。
        shutter.updateStatus(e);
        if (!DcCoreException.Misc.TOO_MANY_CONCURRENT_REQUESTS.equals(e)) {
            throw e;
        } else {
            createTooManyConcurrentResponse(npBulkContexts);
        }
    }
    npBulkRequests.clear();
}