com.google.common.collect.HashMultiset#add ( )源码实例Demo

下面列出了com.google.common.collect.HashMultiset#add ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: gef   文件: AbstractVisualPart.java
@Override
public void attachAnchored(IVisualPart<? extends Node> anchored) {
	// determine the viewer before adding the anchored
	IViewer oldViewer = getViewer();

	// register if we obtain a link to the viewer
	HashMultiset<IVisualPart<? extends Node>> newAnchoreds = HashMultiset
			.create(anchoreds);
	newAnchoreds.add(anchored);
	IViewer newViewer = determineViewer(getParent(), newAnchoreds);

	// unregister from old viewer in case we were registered (oldViewer !=
	// null) and the viewer changes (newViewer != oldViewer)
	if (oldViewer != null && newViewer != oldViewer) {
		oldViewer.unsetAdapter(this);
	}

	// detach anchoreds (and fire change notifications)
	anchoreds.add(anchored);

	// if we obtain a link to the viewer then register at new viewer
	if (newViewer != null && newViewer != oldViewer) {
		newViewer.setAdapter(this,
				String.valueOf(System.identityHashCode(this)));
	}
}
 
源代码2 项目: ner-sequencelearning   文件: VectorizerMain.java
private String[] prepareNGramDictionary(QGram qgram) throws IOException {
    final HashMultiset<String> set = HashMultiset.create();
    try (BufferedReader reader = new BufferedReader(new FileReader(
            inputFilePath))) {

        String line;
        while ((line = reader.readLine()) != null) {
            if (line.isEmpty()) {
                continue;
            }

            String[] split = SPLIT_PATTERN.split(line);
            String tkn = cleanToken(split[0]);
            Map<String, Integer> profile = qgram.getProfile(tkn);
            for (Map.Entry<String, Integer> entry : profile.entrySet()) {
                //noinspection ResultOfMethodCallIgnored
                set.add(entry.getKey(), entry.getValue());
            }
        }
    }

    // do some naive word statistics cut-off
    return set.entrySet()
            .stream()
            .filter(e -> e.getCount() > MIN_CHAR_NGRAM_OCCURRENCE)
            .map(Multiset.Entry::getElement)
            .sorted()
            .toArray(String[]::new);
}
 
源代码3 项目: ArchUnit   文件: ClassesThatTestsExistTest.java
private Multiset<String> getSyntaxElements() {
    HashMultiset<String> result = HashMultiset.create();
    for (Method method : ClassesThat.class.getMethods()) {
        result.add(method.getName());
    }
    return result;
}
 
@Test
public void multiNodeCluster2() throws Exception {
  final Wrapper wrapper = newWrapper(200, 1, 20,
      ImmutableList.of(
          new EndpointAffinity(N1_EP2, 0.15, true, 50),
          new EndpointAffinity(N2_EP2, 0.15, true, 50),
          new EndpointAffinity(N3_EP1, 0.10, true, 50),
          new EndpointAffinity(N4_EP2, 0.20, true, 50),
          new EndpointAffinity(N1_EP1, 0.20, true, 50)
      ));
  INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null);

  // Expect the fragment parallelization to be 20 because:
  // 1. the cost (200) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 200/1=200 width) and
  // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement)
  // 3. max fragment width is 20 which limits the width
  assertEquals(20, wrapper.getWidth());

  final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints();
  assertEquals(20, assignedEps.size());
  final HashMultiset<NodeEndpoint> counts = HashMultiset.create();
  for(final NodeEndpoint ep : assignedEps) {
    counts.add(ep);
  }
  // Each node gets at max 5.
  assertTrue(counts.count(N1_EP2) <= 5);
  assertTrue(counts.count(N2_EP2) <= 5);
  assertTrue(counts.count(N3_EP1) <= 5);
  assertTrue(counts.count(N4_EP2) <= 5);
  assertTrue(counts.count(N1_EP1) <= 5);
}
 
@Test
public void multiNodeClusterNonNormalizedAffinities() throws Exception {
  final Wrapper wrapper = newWrapper(2000, 1, 250,
      ImmutableList.of(
          new EndpointAffinity(N1_EP2, 15, true, 50),
          new EndpointAffinity(N2_EP2, 15, true, 50),
          new EndpointAffinity(N3_EP1, 10, true, 50),
          new EndpointAffinity(N4_EP2, 20, true, 50),
          new EndpointAffinity(N1_EP1, 20, true, 50)
      ));
  INSTANCE.parallelizeFragment(wrapper, newParameters(100, 20, 80), null);

  // Expect the fragment parallelization to be 20 because:
  // 1. the cost (2000) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 2000/100=20 width) and
  // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement)
  // 3. max width per node is 20 which limits the width to 100, but existing width (20) is already less
  assertEquals(20, wrapper.getWidth());

  final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints();
  assertEquals(20, assignedEps.size());
  final HashMultiset<NodeEndpoint> counts = HashMultiset.create();
  for(final NodeEndpoint ep : assignedEps) {
    counts.add(ep);
  }
  // Each node gets at max 5.
  assertThat(counts.count(N1_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5)));
  assertThat(counts.count(N2_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5)));
  assertThat(counts.count(N3_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5)));
  assertThat(counts.count(N4_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5)));
  assertThat(counts.count(N1_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5)));
}
 
源代码6 项目: wikireverse   文件: LinkArrayWritable.java
public String getMostUsedArticleCasing() {
	HashMultiset<String> articleNames = HashMultiset.create();
	String result;

	for (Writable writable: super.get()) {
		LinkWritable link = (LinkWritable)writable;
		articleNames.add(link.getArticle().toString());
	}

	ImmutableMultiset<String> sorted = Multisets.copyHighestCountFirst(articleNames);
	result = (String)sorted.elementSet().toArray()[0];
	
	return result;
}
 
源代码7 项目: hadoop   文件: TestCombineFileInputFormat.java
@Test
public void testNodeInputSplit() throws IOException, InterruptedException {
  // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on 
  // both nodes. The grouping ensures that both nodes get splits instead of 
  // just the first node
  DummyInputFormat inFormat = new DummyInputFormat();
  int numBlocks = 12;
  long totLength = 0;
  long blockSize = 100;
  long maxSize = 200;
  long minSizeNode = 50;
  long minSizeRack = 50;
  String[] locations = { "h1", "h2" };
  String[] racks = new String[0];
  Path path = new Path("hdfs://file");
  
  OneBlockInfo[] blocks = new OneBlockInfo[numBlocks];
  for(int i=0; i<numBlocks; ++i) {
    blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks);
    totLength += blockSize;
  }
  
  List<InputSplit> splits = new ArrayList<InputSplit>();
  HashMap<String, Set<String>> rackToNodes = 
                            new HashMap<String, Set<String>>();
  HashMap<String, List<OneBlockInfo>> rackToBlocks = 
                            new HashMap<String, List<OneBlockInfo>>();
  HashMap<OneBlockInfo, String[]> blockToNodes = 
                            new HashMap<OneBlockInfo, String[]>();
  HashMap<String, Set<OneBlockInfo>> nodeToBlocks = 
                            new HashMap<String, Set<OneBlockInfo>>();
  
  OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes, 
                           nodeToBlocks, rackToNodes);
  
  inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,  
                        maxSize, minSizeNode, minSizeRack, splits);
  
  int expectedSplitCount = (int)(totLength/maxSize);
  assertEquals(expectedSplitCount, splits.size());
  HashMultiset<String> nodeSplits = HashMultiset.create();
  for(int i=0; i<expectedSplitCount; ++i) {
    InputSplit inSplit = splits.get(i);
    assertEquals(maxSize, inSplit.getLength());
    assertEquals(1, inSplit.getLocations().length);
    nodeSplits.add(inSplit.getLocations()[0]);
  }
  assertEquals(3, nodeSplits.count(locations[0]));
  assertEquals(3, nodeSplits.count(locations[1]));
}
 
源代码8 项目: big-c   文件: TestCombineFileInputFormat.java
@Test
public void testNodeInputSplit() throws IOException, InterruptedException {
  // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on 
  // both nodes. The grouping ensures that both nodes get splits instead of 
  // just the first node
  DummyInputFormat inFormat = new DummyInputFormat();
  int numBlocks = 12;
  long totLength = 0;
  long blockSize = 100;
  long maxSize = 200;
  long minSizeNode = 50;
  long minSizeRack = 50;
  String[] locations = { "h1", "h2" };
  String[] racks = new String[0];
  Path path = new Path("hdfs://file");
  
  OneBlockInfo[] blocks = new OneBlockInfo[numBlocks];
  for(int i=0; i<numBlocks; ++i) {
    blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks);
    totLength += blockSize;
  }
  
  List<InputSplit> splits = new ArrayList<InputSplit>();
  HashMap<String, Set<String>> rackToNodes = 
                            new HashMap<String, Set<String>>();
  HashMap<String, List<OneBlockInfo>> rackToBlocks = 
                            new HashMap<String, List<OneBlockInfo>>();
  HashMap<OneBlockInfo, String[]> blockToNodes = 
                            new HashMap<OneBlockInfo, String[]>();
  HashMap<String, Set<OneBlockInfo>> nodeToBlocks = 
                            new HashMap<String, Set<OneBlockInfo>>();
  
  OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes, 
                           nodeToBlocks, rackToNodes);
  
  inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength,  
                        maxSize, minSizeNode, minSizeRack, splits);
  
  int expectedSplitCount = (int)(totLength/maxSize);
  assertEquals(expectedSplitCount, splits.size());
  HashMultiset<String> nodeSplits = HashMultiset.create();
  for(int i=0; i<expectedSplitCount; ++i) {
    InputSplit inSplit = splits.get(i);
    assertEquals(maxSize, inSplit.getLength());
    assertEquals(1, inSplit.getLocations().length);
    nodeSplits.add(inSplit.getLocations()[0]);
  }
  assertEquals(3, nodeSplits.count(locations[0]));
  assertEquals(3, nodeSplits.count(locations[1]));
}