下面列出了com.google.common.collect.Iterators#size ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Test
@SuppressWarnings("unchecked")
public void testCompactUnpartitionedWithNumWriters() throws Exception {
Assume.assumeTrue(setLocalReducerMax(getConfiguration(), 3));
command.repoURI = repoUri;
command.numWriters = 3;
command.datasets = Lists.newArrayList(unpartitioned);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
FileSystemDataset<GenericData.Record> ds =
(FileSystemDataset<GenericData.Record>) repo.<GenericData.Record>
load("default", unpartitioned);
int size = Iterators.size(ds.newReader());
Assert.assertEquals("Should contain copied records", numRecords, size);
Assert.assertEquals("Should produce 3 files",
3, Iterators.size(ds.pathIterator()));
verify(console).info("Compacted {} records in \"{}\"",(long) numRecords, unpartitioned);
verifyNoMoreInteractions(console);
}
@SuppressWarnings("unchecked")
public void testCopyWithNumWriters(int expectedFiles) throws Exception {
Assume.assumeTrue(setLocalReducerMax(getConfiguration(), 3));
command.repoURI = repoUri;
command.numWriters = 3;
command.datasets = Lists.newArrayList(source, dest);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
FileSystemDataset<GenericData.Record> ds =
(FileSystemDataset<GenericData.Record>) repo.<GenericData.Record>
load("default", dest);
int size = Iterators.size(ds.newReader());
Assert.assertEquals("Should contain copied records", 6, size);
Assert.assertEquals("Should produce " + expectedFiles + " files",
expectedFiles, Iterators.size(ds.pathIterator()));
verify(console).info("Added {} records to \"{}\"", 6l, dest);
verifyNoMoreInteractions(console);
}
static void filterByDegree(Vertex vertex, Messenger<String> messenger, Memory memory, boolean persistId) {
if ((vertex.label().equals(Schema.BaseType.ENTITY.name()) ||
vertex.label().equals(Schema.BaseType.ATTRIBUTE.name())) &&
Iterators.size(messenger.receiveMessages()) >= memory.<Long>get(K)) {
String id = vertex.id().toString();
// coreness query doesn't require id
if (persistId) {
vertex.property(K_CORE_LABEL, id);
} else {
vertex.property(K_CORE_LABEL, true);
}
memory.add(K_CORE_EXIST, true);
// send ids from now on, as we want to count connected entities, not relations
sendMessage(messenger, id);
}
}
public ValidateData process(ProcessValidation processValidation, JsonNode jsonValue) {
// fields count
int nbFields = Iterators.size(jsonValue.fieldNames());
eventSizeHistogram.observe(nbFields);
if (nbFields > processValidation.getParameterValidation().getMaxFields()) {
return UtilsValidateData.createValidateData(false, StatusCode.max_fields, TypeValidation.MAX_FIELD, jsonValue, String.valueOf(nbFields));
}
return ValidateData.builder()
.success(true)
.typeValidation(TypeValidation.MAX_FIELD)
.jsonValue(jsonValue)
.build();
}
@SuppressWarnings("unchecked")
public void testCopyWithNumPartitionWriters(int numWriters,
int filesPerPartition,
int expectedFiles)
throws IOException {
Assume.assumeTrue(setLocalReducerMax(getConfiguration(), numWriters));
command.repoURI = repoUri;
command.numWriters = numWriters;
command.filesPerPartition = filesPerPartition;
command.datasets = Lists.newArrayList(source, dest);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
FileSystemDataset<GenericData.Record> ds =
(FileSystemDataset<GenericData.Record>) repo.<GenericData.Record>
load("default", dest);
int size = Iterators.size(ds.newReader());
Assert.assertEquals("Should contain copied records", 6, size);
Assert.assertEquals("Should produce " + expectedFiles + " files",
expectedFiles, Iterators.size(ds.pathIterator()));
verify(console).info("Added {} records to \"{}\"", 6l, dest);
verifyNoMoreInteractions(console);
}
@Test
@SuppressWarnings("unchecked")
public void testPartitionedCompactWithNumWritersNumFilesPerPartition() throws Exception {
Assume.assumeTrue(setLocalReducerMax(getConfiguration(), 2));
command.repoURI = repoUri;
// if a reducer gets multiple parts of a partition, they will be combined
// use more reducers to reduce the likelihood of that case
command.numWriters = 10;
command.filesPerPartition = 3;
command.datasets = Lists.newArrayList(partitioned);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
FileSystemDataset<GenericData.Record> ds =
(FileSystemDataset<GenericData.Record>) repo.<GenericData.Record>
load("default", partitioned);
int size = Iterators.size(ds.newReader());
Assert.assertEquals("Should contain copied records", numRecords, size);
Assert.assertEquals("Should produce 2 partitions", 2, Iterators.size(ds.getCoveringPartitions().iterator()));
Assert.assertEquals("Should produce 6 files", 6, Iterators.size(ds.pathIterator()));
verify(console).info("Compacted {} records in \"{}\"", (long)numRecords, partitioned);
verifyNoMoreInteractions(console);
}
@Test
public void testWriteAndListLogEntriesAsync() throws ExecutionException, InterruptedException {
String logName = RemoteLoggingHelper.formatForTest("log_name");
String filter = "logName=projects/" + logging.getOptions().getProjectId() + "/logs/" + logName;
loggingSnippets.write(logName);
// flush all pending asynchronous writes
logging.flush();
Iterator<LogEntry> iterator =
loggingSnippets.listLogEntriesAsync(filter).iterateAll().iterator();
while (Iterators.size(iterator) < 2) {
Thread.sleep(500);
iterator = loggingSnippets.listLogEntriesAsync(filter).iterateAll().iterator();
}
assertTrue(loggingSnippets.deleteLogAsync(logName));
}
@Test
public void testWriteAndListLogEntries() throws InterruptedException {
String logName = RemoteLoggingHelper.formatForTest("log_name");
String filter = "logName=projects/" + logging.getOptions().getProjectId() + "/logs/" + logName;
loggingSnippets.write(logName);
Iterator<LogEntry> iterator = loggingSnippets.listLogEntries(filter).iterateAll().iterator();
while (Iterators.size(iterator) < 2) {
Thread.sleep(500);
iterator = loggingSnippets.listLogEntries(filter).iterateAll().iterator();
}
assertTrue(loggingSnippets.deleteLog(logName));
}
@Test
public void testBasicCopy() throws Exception {
command.repoURI = repoUri;
command.datasets = Lists.newArrayList(source, dest);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
int size = Iterators.size(repo.load("default", dest).newReader());
Assert.assertEquals("Should contain copied records", 6, size);
verify(console).info("Added {} records to \"{}\"", 6l, dest);
verifyNoMoreInteractions(console);
}
@Test
@SuppressWarnings("unchecked")
public void testPartitionedCompactWithNumWriters() throws Exception {
Assume.assumeTrue(setLocalReducerMax(getConfiguration(), 2));
command.repoURI = repoUri;
command.numWriters = 2;
command.filesPerPartition = 1;
command.datasets = Lists.newArrayList(partitioned);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
FileSystemDataset<GenericData.Record> ds =
(FileSystemDataset<GenericData.Record>) repo.<GenericData.Record>
load("default", partitioned);
int size = Iterators.size(ds.newReader());
Assert.assertEquals("Should contain copied records", numRecords, size);
Assert.assertEquals("Should produce 2 partitions", 2, Iterators.size(ds.getCoveringPartitions().iterator()));
Assert.assertEquals(
"Should produce 2 files: " + Iterators.toString(ds.pathIterator()),
2, Iterators.size(ds.pathIterator()));
verify(console).info("Compacted {} records in \"{}\"", (long) numRecords, partitioned);
verifyNoMoreInteractions(console);
}
public static void main(String[] args) throws Exception {
Opts opts = new Opts();
opts.parseArgs(ContinuousQuery.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
ArrayList<Text[]> randTerms = findRandomTerms(
client.createScanner(opts.doc2Term, Authorizations.EMPTY), opts.numTerms);
Random rand = new Random();
try (BatchScanner bs = client.createBatchScanner(opts.tableName, Authorizations.EMPTY, 5)) {
for (long i = 0; i < opts.iterations; i += 1) {
Text[] columns = randTerms.get(rand.nextInt(randTerms.size()));
bs.clearScanIterators();
bs.clearColumns();
IteratorSetting ii = new IteratorSetting(20, "ii", IntersectingIterator.class);
IntersectingIterator.setColumnFamilies(ii, columns);
bs.addScanIterator(ii);
bs.setRanges(Collections.singleton(new Range()));
long t1 = System.currentTimeMillis();
int count = Iterators.size(bs.iterator());
long t2 = System.currentTimeMillis();
System.out.printf(" %s %,d %6.3f%n", Arrays.asList(columns), count, (t2 - t1) / 1000.0);
}
}
}
}
public static int size(Object obj) {
Preconditions.checkArgument(obj != null);
if (obj instanceof Traversal) return size(((Traversal) obj).toList());
else if (obj instanceof Collection) return ((Collection)obj).size();
else if (obj instanceof Iterable) return Iterables.size((Iterable) obj);
else if (obj instanceof Iterator) return Iterators.size((Iterator)obj);
else if (obj.getClass().isArray()) return Array.getLength(obj);
throw new IllegalArgumentException("Cannot determine size of: " + obj);
}
@Test
public void testBasicUnpartitionedCompact() throws Exception {
command.repoURI = repoUri;
command.datasets = Lists.newArrayList(unpartitioned);
int rc = command.run();
Assert.assertEquals("Should return success", 0, rc);
DatasetRepository repo = DatasetRepositories.repositoryFor("repo:" + repoUri);
int size = Iterators.size(repo.load("default", unpartitioned).newReader());
Assert.assertEquals("Should contain copied records", numRecords, size);
verify(console).info("Compacted {} records in \"{}\"", (long)numRecords, unpartitioned);
verifyNoMoreInteractions(console);
}
@Override
public void finish()
{
Iterators.size(pages);
}
private int getNumOptions(LegacyKVStore kvStore) {
return Iterators.size(kvStore.find().iterator());
}
public void redraw() {
this.button.setIconType(IconFont.ICON_FILTER);
this.button.setActive(this.activate);
Container row = (Container) ((Widget) this.headerCell).getParent();
this.headerCell.clear();
if (this.valueChangeRegistration != null) {
this.valueChangeRegistration.removeHandler();
this.valueChangeRegistration = null;
}
boolean showFilterRow = false;
if (this.activate) {
this.headerCell.append(this.inputText);
this.valueChangeRegistration = this.inputText.addBlurHandler(new BlurHandler() {
@Override
public void onBlur(BlurEvent event) {
String newValue = TableFilter.this.inputText.flush();
if (!Objects.equal(TableFilter.this.filterValue, newValue)) {
TableFilter.this.filterValue = newValue;
TableFilter.this.getDriver().resetDisplay();
}
}
});
Widget hCell = this.button.getParent().getParent();
hCell.getElement().getStyle().setWidth(hCell.getOffsetWidth(), Unit.PX);
showFilterRow = true;
}
if (!showFilterRow) {
for (Widget cell : row) {
if (cell instanceof TableTH) {
showFilterRow |= Iterators.size(((TableTH<?>) cell).iterator()) > 0;
if (showFilterRow) {
break;
}
}
}
}
row.setVisible(showFilterRow);
}
@Override
public int size() {
return Iterators.size(iterator());
}
private double[] trainDistributed() throws IOException, NotBoundException {
final Set<FeatureKey> boundedFeatures = new HashSet<>();
final Map<FeatureKey, Integer> featureToIndex = makeKeyToIndexMap(trainingParameters.minimumFeatureFrequency,
boundedFeatures);
final Collection<RemoteTrainer> workers = getTrainers(featureToIndex);
System.out.println("Training nodes: " + workers.size());
final List<Runnable> tasks = new ArrayList<>();
final int sentencesToLoad = Iterators.size(ParallelCorpusReader.READER.readCorpus(false /* not dev */));
final int shardSize = sentencesToLoad / workers.size();
int i = 0;
for (final RemoteTrainer worker : workers) {
final int start = i * shardSize;
final int end = start + shardSize;
tasks.add(new Runnable() {
@Override
public void run() {
try {
worker.loadData(start, end, dataParameters, trainingParameters.getModelFolder(), trainingLogger);
} catch (final Throwable e) {
throw new RuntimeException(e);
}
}
});
i++;
}
Util.runJobsInParallel(tasks, workers.size());
final double[] weights = train(new DistributedLossFunction(workers, trainingParameters.getSigmaSquared()),
featureToIndex, boundedFeatures);
return weights;
}
/**
* Returns the number of elements in {@code iterator}.
* The given iterator is left exhausted.
*
* @param iterator
* the iterator. May not be <code>null</code>.
* @return the number of elements in {@code iterator}.
*/
public static int size(Iterator<?> iterator) {
return Iterators.size(iterator);
}
/**
* Count the tuple for the given predicate
* @param predicate
* @return
*/
protected int countTuplesForPredicate(final Predicate predicate) {
final Iterator<Tuple> iterator = new PredicateTupleFilterIterator(memtable.iterator(), predicate);
return Iterators.size(iterator);
}