下面列出了com.google.common.collect.Iterators#limit ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
protected Iterator<E> flatMap(Traverser.Admin<Element> traverser) {
if (useMultiQuery) { //it is guaranteed that all elements are vertices
if (multiQueryResults == null || !multiQueryResults.containsKey(traverser.get())) {
initializeMultiQuery(Arrays.asList(traverser));
}
return convertIterator(multiQueryResults.get(traverser.get()));
} else if (traverser.get() instanceof JanusGraphVertex || traverser.get() instanceof WrappedVertex) {
JanusGraphVertexQuery query = makeQuery((JanusGraphTraversalUtil.getJanusGraphVertex(traverser)).query());
return convertIterator(query.properties());
} else {
//It is some other element (edge or vertex property)
Iterator<E> iterator;
if (getReturnType().forValues()) {
iterator = traverser.get().values(getPropertyKeys());
} else {
//HasContainers don't apply => empty result set
if (!hasContainers.isEmpty()) return Collections.emptyIterator();
iterator = (Iterator<E>) traverser.get().properties(getPropertyKeys());
}
if (limit != Query.NO_LIMIT) iterator = Iterators.limit(iterator, limit);
return iterator;
}
}
@Override
public Iterator<ByteBuffer> scanRecords(UUID dataId, @Nullable ByteBuffer from, @Nullable final ByteBuffer to,
int batchSize, int limit) {
final Iterator<Column<ByteBuffer>> iter = executePaginated(
_keyspace.prepareQuery(CF_DEDUP_DATA, ConsistencyLevel.CL_LOCAL_QUORUM)
.getKey(dataId)
.withColumnRange(new RangeBuilder()
.setStart(Objects.firstNonNull(from, EMPTY_BUFFER))
.setEnd(Objects.firstNonNull(to, EMPTY_BUFFER))
.setLimit(batchSize)
.build())
.autoPaginate(true));
return Iterators.limit(new AbstractIterator<ByteBuffer>() {
@Override
protected ByteBuffer computeNext() {
while (iter.hasNext()) {
ByteBuffer record = iter.next().getName();
if (!record.equals(to)) { // To is exclusive
return record;
}
}
return endOfData();
}
}, limit);
}
@Override
public Iterator<Change> readTimeline(Key key, boolean includeContentData,
UUID start, UUID end, boolean reversed, long limit, ReadConsistency consistency) {
checkNotNull(key, "key");
String table = key.getTable().getName();
Ordering<UUID> ordering = reversed ? TimeUUIDs.ordering().reverse() : TimeUUIDs.ordering();
NavigableMap<UUID, Change> map = Maps.newTreeMap(ordering);
if (includeContentData) {
map.putAll(safeGet(_contentChanges, table, key.getKey()));
}
if (start != null) {
map = map.tailMap(start, true);
}
if (end != null) {
map = map.headMap(end, true);
}
return Iterators.limit(map.values().iterator(), (int) Math.min(limit, Integer.MAX_VALUE));
}
@Override
public void writeOutput(Iterable<DocumentResult> documentResults, File outputDirectory)
throws IOException {
// now we compute many "samples" of possible corpora based on our existing corpus. We score each of
// these samples and compute confidence intervals from them
final Random rng = new Random(bootstrapSeed);
final Iterator<Collection<DocumentResult>> bootstrappedResults =
Iterators.limit(BootstrapIterator.forData(documentResults, rng), numBootstrapSamples);
final List<Map<String, BrokenDownSummaryConfusionMatrix<Symbol>>> resultsForSamples =
Lists.newArrayList();
while (bootstrappedResults.hasNext()) {
resultsForSamples.add(combineBreakdowns(
transform(bootstrappedResults.next(), DocumentResult.GetBreakdownMatricesFunction)
.iterator()));
}
final ImmutableMultimap<String, BrokenDownSummaryConfusionMatrix<Symbol>>
resultsByBreakdownType =
combineMapsToMultimap(resultsForSamples);
writeSampledBreakdownsToFiles(resultsByBreakdownType, outputDirectory);
}
private DirectoryStream<FileAttributes> getFilesForFormatting(FileSystem fs, Path path) throws IOException, FileCountTooLargeException {
final int maxFilesLimit = FileDatasetHandle.getMaxFilesLimit(context);
final DirectoryStream<FileAttributes> baseIterator = FileSystemUtils.listRecursive(fs, path, NO_HIDDEN_FILES);
if (maxFilesLimit <= 0) {
return baseIterator;
}
return new FilterDirectoryStream<FileAttributes>(baseIterator) {
@Override
public Iterator<FileAttributes> iterator() {
return Iterators.limit(super.iterator(), maxFilesLimit);
}
};
}
public <T> Iterable<T> apply(final Iterable<T> s) {
if (s instanceof List) {
return apply((List<T>) s);
} else {
return new Iterable<T>() {
public Iterator<T> iterator() {
Iterator<T> itr = s.iterator();
itr = Iterators.limit(itr,max);
if (min>0)
advance(itr,min);
return itr;
}
};
}
}
@Override
public Iterator<? extends T> iterator(long first, long count) {
Collection<T> data =dataModel.getObject();
if(data==null || data.size()==0) return Collections.emptyIterator();
if(filterPredicate!=null) data = Collections2.filter(data, filterPredicate);
Iterator<T> it;
final SortParam<S> sortParam = getSort();
if(sortParam!=null && sortParam.getProperty()!=null)
{
Ordering<T> ordering = Ordering.natural().nullsFirst().onResultOf(new Function<T, Comparable<?>>() {
@Override
public Comparable<?> apply(T input) {
return comparableValue(input, sortParam.getProperty());
}
});
if(!sortParam.isAscending()) ordering = ordering.reverse();
it=ordering.sortedCopy(data).iterator();
}
else
{
it=data.iterator();
}
if(filterPredicate!=null) it = Iterators.filter(it, filterPredicate);
if(first>0) Iterators.advance(it, (int)first);
return count>=0?Iterators.limit(it, (int)count):it;
}
@Override
public List<InputSplit> getSplits(JobContext context)
throws IOException, InterruptedException {
Path[] inputPaths = FileInputFormat.getInputPaths(context);
if (inputPaths == null || inputPaths.length == 0) {
throw new IOException("No input found!");
}
List<String> allPaths = Lists.newArrayList();
for (Path path : inputPaths) {
// path is a single work unit / multi work unit
FileSystem fs = path.getFileSystem(context.getConfiguration());
FileStatus[] inputs = fs.listStatus(path);
if (inputs == null) {
throw new IOException(String.format("Path %s does not exist.", path));
}
log.info(String.format("Found %d input files at %s: %s", inputs.length, path, Arrays.toString(inputs)));
for (FileStatus input : inputs) {
allPaths.add(input.getPath().toString());
}
}
int maxMappers = getMaxMapper(context.getConfiguration());
int numTasksPerMapper =
allPaths.size() % maxMappers == 0 ? allPaths.size() / maxMappers : allPaths.size() / maxMappers + 1;
List<InputSplit> splits = Lists.newArrayList();
Iterator<String> pathsIt = allPaths.iterator();
while (pathsIt.hasNext()) {
Iterator<String> limitedIterator = Iterators.limit(pathsIt, numTasksPerMapper);
splits.add(new GobblinSplit(Lists.newArrayList(limitedIterator)));
}
return splits;
}
@SuppressWarnings({"unchecked", "rawtypes"})
public CloseableIterator<Object> query(
final DataStoreOperations datastoreOperations,
final DataStoreOptions options,
final PersistentAdapterStore adapterStore,
final InternalAdapterStore internalAdapterStore,
final double[] maxResolutionSubsamplingPerDimension,
final double[] targetResolutionPerDimensionForHierarchicalIndex,
final Integer limit,
final Integer queryMaxRangeDecomposition,
final boolean delete) {
final RowReader<?> reader =
getReader(
datastoreOperations,
options,
adapterStore,
internalAdapterStore,
maxResolutionSubsamplingPerDimension,
targetResolutionPerDimensionForHierarchicalIndex,
limit,
queryMaxRangeDecomposition,
getRowTransformer(
options,
adapterStore,
maxResolutionSubsamplingPerDimension,
!isCommonIndexAggregation()),
delete);
if (reader == null) {
return new CloseableIterator.Empty();
}
Iterator it = reader;
if ((limit != null) && (limit > 0)) {
it = Iterators.limit(it, limit);
}
return new CloseableIteratorWrapper(reader, it);
}
void restart() {
this.activeFiles = Iterators.limit(this.allFilesCyclic, this.files.size());
}
/**
* Fetches maximum up to MAX_MBP_RUNS_ROWS rows from each branch and does pagination on that.
*
* JVM property MAX_MBP_RUNS_ROWS can be used to tune this value to optimize performance for given setup
*/
@Override
public Iterator<BlueRun> iterator(int start, int limit) {
List<BlueRun> c = new ArrayList<>();
List<BluePipeline> branches;
// Check for branch filter
StaplerRequest req = Stapler.getCurrentRequest();
String branchFilter = null;
if (req != null) {
branchFilter = req.getParameter("branch");
}
if (!StringUtils.isEmpty(branchFilter)) {
BluePipeline pipeline = blueMbPipeline.getBranches().get(branchFilter);
if (pipeline != null) {
branches = Collections.singletonList(pipeline);
} else {
branches = Collections.emptyList();
}
} else {
branches = Lists.newArrayList(blueMbPipeline.getBranches().list());
sortBranchesByLatestRun(branches);
}
for (final BluePipeline b : branches) {
BlueRunContainer blueRunContainer = b.getRuns();
if(blueRunContainer==null){
continue;
}
Iterator<BlueRun> it = blueRunContainer.iterator(0, MAX_MBP_RUNS_ROWS);
int count = 0;
Utils.skip(it, start);
while (it.hasNext() && count++ < limit) {
c.add(it.next());
}
}
Collections.sort(c, LATEST_RUN_START_TIME_COMPARATOR);
return Iterators.limit(c.iterator(), limit);
}
private static <T> Iterator<T> repeatingIterator(T elem, int repeat) {
return Iterators.limit(Iterators.cycle(elem), repeat);
}
/**
* Returns the matching table report entries for a report ID and query parameters.
*/
@Override
public Iterable<TableReportEntry> getReportEntries(String reportId, final AllTablesReportQuery query) {
checkNotNull(reportId, "reportId");
final String reportTable = getTableName(reportId);
// Set up several filters based on the query attributes
final Predicate<String> placementFilter = query.getPlacements().isEmpty()
? Predicates.<String>alwaysTrue()
: Predicates.in(query.getPlacements());
final Predicate<Boolean> droppedFilter = query.isIncludeDropped()
? Predicates.<Boolean>alwaysTrue()
: Predicates.equalTo(false);
final Predicate<Boolean> facadeFilter = query.isIncludeFacades()
? Predicates.<Boolean>alwaysTrue()
: Predicates.equalTo(false);
return new Iterable<TableReportEntry>() {
@Override
public Iterator<TableReportEntry> iterator() {
return Iterators.limit(
Iterators.filter(
Iterators.transform(
queryDataStoreForTableReportResults(reportTable, query),
new Function<Map<String, Object>, TableReportEntry>() {
@Nullable
@Override
public TableReportEntry apply(Map<String, Object> map) {
if (Intrinsic.getId(map).startsWith("~")) {
// Skip this row, it's the metadata
return null;
}
return convertToTableReportEntry(
map, placementFilter, droppedFilter, facadeFilter);
}
}),
Predicates.notNull()
),
query.getLimit()
);
}
};
}
@Override
public Row one() {
// If we've already identified pre-fetched rows that can be read locally then return the next row.
if (_fetchedResults.hasNext()) {
return _fetchedResults.next();
}
// Determine how many rows are available without fetching, if any. This can happen if a call to
// fetchMoreResults() made more results locally available since _fetchedResults was created.
int availableWithoutFetching = _delegate.getAvailableWithoutFetching();
if (availableWithoutFetching != 0) {
// Create an iterator for these rows to return them efficiently since we know returning them
// will never throw an adaptive exception.
_fetchedResults = Iterators.limit(_delegate.iterator(), availableWithoutFetching);
return _fetchedResults.next();
}
// At this point either the result set is exhausted or the next row requires fetching more results. Determining
// either of these may potentially raise an exception which requires adapting the fetch size.
Throwable fetchException;
// If an asynchronous pre-fetch from a prior call to fetchMoreResults() failed for an adaptive reason then
// don't try again with the current fetch size.
if (_delegateWithPrefetchFailure == _delegate && _prefetchFailure != null) {
fetchException = _prefetchFailure;
_delegateWithPrefetchFailure = null;
_prefetchFailure = null;
} else {
try {
return _delegate.one();
} catch (Throwable t) {
fetchException = t;
}
}
// This code is only reachable if there was an exception fetching more rows. If appropriate reduce the fetch
// size and try again, otherwise propagate the exception.
if (!reduceFetchSize(fetchException)) {
throw Throwables.propagate(fetchException);
}
// Call again to return the next row.
return one();
}
@Override
public Iterator<SkyKey> iterator() {
return Iterators.limit(Iterators.forArray(arr), size);
}
/**
* Poorman's {@link Pageable} implementation that does
* skipping by simply fast-forwarding {@link Iterator}
*
* @param base base collection
* @param start starting index requested from collection
* @param limit max number of item requested in the page
* @param <T> type of Pageable item
* @return iterator with starting index==start and size < limit
*/
public static <T> Iterator<T> slice(Iterator<T> base, int start, int limit) {
// fast-forward
int skipped = skip(base,start);
if (skipped < start){ //already at the end, nothing to return
Iterators.emptyIterator();
}
return Iterators.limit(base, limit);
}