com.google.common.collect.Multiset#Entry ( )源码实例Demo

下面列出了com.google.common.collect.Multiset#Entry ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

/**
 * Return the element with the maximum likelihood. If given is unkown, then
 * return null.
 * 
 * @param given
 * @return
 */
@Override
public Optional<A> getMaximumLikelihoodElement(final B given) {
	if (!table.containsKey(given)) {
		return Optional.absent();
	}
	int maxCount = 0;
	A maxLi = null;
	for (final Multiset.Entry<A> entry : table.get(given).entrySet()) {
		if (maxCount < entry.getCount()) {
			maxCount = entry.getCount();
			maxLi = entry.getElement();
		}
	}

	return Optional.of(maxLi);
}
 
private static void validateMetadataKeyUniqueness(MultiVmDeploymentPackageSpec spec) {
  // Ensures that metadata keys are unique.
  Multiset<String> metadataKeyCounts = HashMultiset.create();
  for (PasswordSpec password : spec.getPasswordsList()) {
    metadataKeyCounts.add(password.getMetadataKey());
  }
  for (VmTierSpec tier : spec.getTiersList()) {
    Multiset<String> perTier = HashMultiset.create(metadataKeyCounts);
    for (GceMetadataItem metadataItem : tier.getGceMetadataItemsList()) {
      perTier.add(metadataItem.getKey());
    }
    for (Multiset.Entry<String> entry : perTier.entrySet()) {
      if (entry.getCount() > 1) {
        throw new IllegalArgumentException(
            String.format("Metadata key '%s' is not unique", entry.getElement()));
      }
    }
  }
}
 
源代码3 项目: immutables   文件: ValueTypeComposer.java
private void checkAttributeNamesForDuplicates(ValueType type, Protoclass protoclass) {
  if (!type.attributes.isEmpty()) {
    Multiset<String> attributeNames = HashMultiset.create(type.attributes.size());
    for (ValueAttribute attribute : type.attributes) {
      if (attribute.isGenerateLazy) {
        attributeNames.add(attribute.name() + "$lazy"); // making lazy compare in it's own scope
      } else {
        attributeNames.add(attribute.name());
      }
    }

    List<String> duplicates = Lists.newArrayList();
    for (Multiset.Entry<String> entry : attributeNames.entrySet()) {
      if (entry.getCount() > 1) {
        duplicates.add(entry.getElement().replace("$lazy", ""));
      }
    }

    if (!duplicates.isEmpty()) {
      protoclass.report()
          .error("Duplicate attribute names %s. You should check if correct @Value.Style applied",
              duplicates);
    }
  }
}
 
源代码4 项目: tac-kbp-eal   文件: ByEventTypeResultWriter.java
@Override
public void writeResult(final List<EALScorer2015Style.Result> perDocResults,
    final File eventTypesDir) throws IOException {
  final Multiset<Symbol> eventTypesSeen = gatherEventTypesSeen(perDocResults);

  for (final Multiset.Entry<Symbol> typeEntry : Multisets.copyHighestCountFirst(eventTypesSeen)
      .entrySet()) {
    final Symbol type = typeEntry.getElement();
    final Function<EALScorer2015Style.ArgResult, EALScorer2015Style.ArgResult>
        filterFunction =
        new Function<EALScorer2015Style.ArgResult, EALScorer2015Style.ArgResult>() {
          @Override
          public EALScorer2015Style.ArgResult apply(final
          EALScorer2015Style.ArgResult input) {
            return input.copyFiltered(compose(equalTo(type), type()));
          }
        };
    final File eventTypeDir = new File(eventTypesDir, type.asString());
    eventTypeDir.mkdirs();
    writeOverallArgumentScoresForTransformedResults(perDocResults, filterFunction,
        eventTypeDir);
  }
}
 
源代码5 项目: entity-fishing   文件: EntityScorer.java
public ScorerContext context(List<String> words) {
    Multiset<String> counter = TreeMultiset.create();
    counter.addAll(words);

    int word_dim = kb.getEmbeddingsSize();
    // word_vecs is the concatenation of all word vectors of the word list
    float[] word_vecs = new float[counter.size() * word_dim];
    IntArrayList word_counts = new IntArrayList();
    int n_words = 0;

    for(Multiset.Entry<String> entry : counter.entrySet()) {
        short[] vector = kb.getWordEmbeddings(entry.getElement());
        if (vector != null) {
            word_counts.add(entry.getCount());
            for (int i=0; i<kb.getEmbeddingsSize(); i++) {
                word_vecs[n_words * word_dim + i] = vector[i];
            }
            n_words += 1;
        }
    }
    word_counts.trim();

    return create_context(word_vecs, word_counts.elements());
}
 
/**
 * Return the element with the maximum likelihood. If given is unkown, then
 * return null.
 * 
 * @param given
 * @return
 */
@Override
public Optional<A> getMaximumLikelihoodElement(final B given) {
	if (!table.containsKey(given)) {
		return Optional.absent();
	}
	int maxCount = 0;
	A maxLi = null;
	for (final Multiset.Entry<A> entry : table.get(given).entrySet()) {
		if (maxCount < entry.getCount()) {
			maxCount = entry.getCount();
			maxLi = entry.getElement();
		}
	}

	return Optional.of(maxLi);
}
 
源代码7 项目: spinach   文件: GetJobsAction.java
private void reconnectToNearestProducer(DisqueConnection<K, V> disqueConnection, boolean forcedReconnect) {
    log.debug("reconnectToNearestProducer()");
    Set<Multiset.Entry<String>> stats = Multisets.copyHighestCountFirst(nodePrefixes).entrySet();
    nodePrefixes.clear();

    if (!isNodeSwitchNecessary(stats) && !forcedReconnect) {
        return;
    }

    String nodeIdPrefix = getNodeIdPrefix(stats);
    if (nodeIdPrefix != null) {
        log.debug("Set preferred node prefix to {}", nodeIdPrefix);
        socketAddressSupplier.setPreferredNodeIdPrefix(nodeIdPrefix);
    }

    if (disqueConnection.isOpen()) {
        if (nodeIdPrefix == null) {
            log.info("Initiating reconnect");
        } else {
            log.info("Initiating reconnect to preferred node with prefix {}", nodeIdPrefix);
        }
        disconnect((RedisChannelHandler<?, ?>) disqueConnection);
    }
}
 
@Override
public IteratorBatch<String> nextElements(long iteratorId, int position) {
  IteratorBatch<Multiset.Entry<String>> batch = nextEntries(iteratorId, position);
  return batch == null ? null : new IteratorBatch<>(batch.id(), batch.position(), batch.entries()
      .stream()
      .map(element -> element.getElement())
      .collect(Collectors.toList()), batch.complete());
}
 
源代码9 项目: api-mining   文件: SampleUtils.java
/**
 * Get a uniformly random element from a Multiset.
 * 
 * @param set
 * @return
 */
public static <T> T getRandomElement(final Multiset<T> set) {
	final int randPos = RandomUtils.nextInt(checkNotNull(set).size());

	T selected = null;
	int i = 0;
	for (final Multiset.Entry<T> entry : set.entrySet()) {
		i += entry.getCount();
		if (i > randPos) {
			selected = entry.getElement();
			break;
		}
	}
	return selected;
}
 
源代码10 项目: chaos-http-proxy   文件: ChaosConfig.java
Properties getProperties() {
    Properties properties = new Properties();
    for (Multiset.Entry<Failure> entry :
            ImmutableMultiset.copyOf(failures).entrySet()) {
        properties.setProperty(entry.getElement().toPropertyName(),
                String.valueOf(entry.getCount()));
    }
    return properties;
}
 
源代码11 项目: atomix   文件: DefaultDistributedMultisetService.java
@Override
public IteratorBatch<Multiset.Entry<String>> iterateEntries() {
  IteratorContext iterator = new IteratorContext(getCurrentSession().sessionId().id());
  if (!iterator.iterator.hasNext()) {
    return null;
  }

  long iteratorId = getCurrentIndex();
  entryIterators.put(iteratorId, iterator);
  IteratorBatch<Multiset.Entry<String>> batch = nextEntries(iteratorId, 0);
  if (batch.complete()) {
    entryIterators.remove(iteratorId);
  }
  return batch;
}
 
源代码12 项目: atomix   文件: AtomicMultimapProxy.java
@Override
public CompletableFuture<Void> removeListener(CollectionEventListener<Multiset.Entry<byte[]>> listener) {
  return Futures.exceptionalFuture(new UnsupportedOperationException());
}
 
源代码13 项目: atomix   文件: AtomicMultimapProxy.java
@Override
public AsyncDistributedSet<Multiset.Entry<byte[]>> entrySet() {
  return new EntrySet();
}
 
@Override
public AsyncDistributedSet<Multiset.Entry<E>> entrySet() {
  return asyncMultiset.entrySet();
}
 
源代码15 项目: atomix   文件: DistributedMultisetProxy.java
@Override
public CompletableFuture<Boolean> retainAll(Collection<? extends Multiset.Entry<String>> c) {
  throw new UnsupportedOperationException();
}
 
源代码16 项目: atomix   文件: AtomicMultimapProxy.java
@Override
public CompletableFuture<Boolean> retainAll(Collection<? extends Multiset.Entry<byte[]>> c) {
  return Futures.exceptionalFuture(new UnsupportedOperationException());
}
 
源代码17 项目: atomix   文件: AtomicMultimapProxy.java
@Override
public CompletableFuture<Boolean> remove(Multiset.Entry<byte[]> element) {
  return Futures.exceptionalFuture(new UnsupportedOperationException());
}
 
源代码18 项目: atomix   文件: AtomicMultimapProxy.java
@Override
public CompletableFuture<Boolean> add(Multiset.Entry<byte[]> element) {
  return Futures.exceptionalFuture(new UnsupportedOperationException());
}
 
源代码19 项目: atomix   文件: AtomicMultimapProxy.java
@Override
public DistributedSet<Multiset.Entry<byte[]>> sync(Duration operationTimeout) {
  return new BlockingDistributedSet<>(this, operationTimeout.toMillis());
}
 
源代码20 项目: sequence-mining   文件: EMStep.java
/** EM-step for structural EM */
static Tuple2<Double, Map<Integer, Double>> structuralEMStep(final TransactionDatabase transactions,
		final InferenceAlgorithm inferenceAlgorithm, final Sequence candidate) {
	final double noTransactions = transactions.size();

	// Calculate max. no. of candidate occurrences
	final int maxReps = transactions.getTransactionList().parallelStream().mapToInt(t -> t.repetitions(candidate))
			.max().getAsInt();
	final Map<Integer, Double> initProb = new HashMap<>();
	initProb.put(0, 0.);
	for (int occur = 1; occur <= maxReps; occur++)
		initProb.put(occur, 1.);

	// E-step (adding candidate to transactions that support it)
	final Map<Multiset.Entry<Sequence>, Long> coveringWithCounts = transactions.getTransactionList()
			.parallelStream().map(t -> {
				if (t.contains(candidate)) {
					t.addSequenceCache(candidate, initProb);
					final Multiset<Sequence> covering = inferenceAlgorithm.infer(t);
					t.setTempCachedCovering(covering);
					return covering.entrySet();
				}
				return t.getCachedCovering().entrySet();
			}).flatMap(Set::stream).collect(groupingBy(identity(), counting()));

	// M-step
	final Table<Sequence, Integer, Double> newSequences = coveringWithCounts.entrySet().parallelStream().collect(
			HashBasedTable::create,
			(t, e) -> t.put(e.getKey().getElement(), e.getKey().getCount(), e.getValue() / noTransactions),
			Table::putAll);
	newSequences.rowKeySet().parallelStream().forEach(seq -> {
		// Pad with zero counts for non-occurrences
		final int maxOccur = Collections.max(newSequences.row(seq).keySet());
		for (int occur = 1; occur <= maxOccur; occur++) {
			if (!newSequences.contains(seq, occur))
				newSequences.put(seq, occur, 0.);
		} // Add probabilities for zero occurrences
		double rowSum = 0;
		for (final Double count : newSequences.row(seq).values())
			rowSum += count;
		newSequences.put(seq, 0, 1 - rowSum);
	});

	// Get average cost (removing candidate from supported transactions)
	final double averageCost = transactions.getTransactionList().parallelStream().mapToDouble(t -> {
		double cost;
		if (t.contains(candidate))
			cost = t.getTempCachedCost(newSequences);
		else
			cost = t.getCachedCost(newSequences);
		t.removeSequenceCache(candidate);
		return cost;
	}).sum() / noTransactions;

	// Get candidate prob
	final Map<Integer, Double> prob = newSequences.row(candidate);

	return new Tuple2<Double, Map<Integer, Double>>(averageCost, prob);
}