com.google.common.collect.Lists#partition ( )源码实例Demo

下面列出了com.google.common.collect.Lists#partition ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: metacat   文件: MysqlUserMetadataService.java
@Override
public void deleteDefinitionMetadata(
    @Nonnull final List<QualifiedName> names
) {
    try {
        final List<List<QualifiedName>> subLists =
            Lists.partition(names, config.getUserMetadataMaxInClauseItems());
        for (List<QualifiedName> subNames : subLists) {
            _deleteDefinitionMetadata(subNames);
        }
    } catch (Exception e) {
        final String message = String.format("Failed deleting the definition metadata for %s", names);
        log.error(message, e);
        throw new UserMetadataServiceException(message, e);
    }
}
 
源代码2 项目: foxtrot   文件: ElasticsearchQueryStore.java
private void deleteIndices(List<String> indicesToDelete) {
    logger.warn("Deleting Indexes - Indexes - {}", indicesToDelete);
    if(!indicesToDelete.isEmpty()) {
        List<List<String>> subLists = Lists.partition(indicesToDelete, 5);
        for(List<String> subList : subLists) {
            try {
                connection.getClient()
                        .admin()
                        .indices()
                        .prepareDelete(subList.toArray(new String[0]))
                        .execute()
                        .actionGet(TimeValue.timeValueMinutes(5));
                logger.warn("Deleted Indexes - Indexes - {}", subList);
            } catch (Exception e) {
                logger.error("Index deletion failed - Indexes - {}", subList, e);
            }
        }
    }
}
 
源代码3 项目: nomulus   文件: GetAllocationTokenCommand.java
@Override
public void run() {
  ImmutableMap.Builder<String, AllocationToken> builder = new ImmutableMap.Builder<>();
  for (List<String> tokens : Lists.partition(mainParameters, BATCH_SIZE)) {
    ImmutableList<Key<AllocationToken>> tokenKeys =
        tokens.stream().map(t -> Key.create(AllocationToken.class, t)).collect(toImmutableList());
    ofy().load().keys(tokenKeys).forEach((k, v) -> builder.put(k.getName(), v));
  }
  ImmutableMap<String, AllocationToken> loadedTokens = builder.build();
  ImmutableMap<Key<DomainBase>, DomainBase> domains = loadRedeemedDomains(loadedTokens.values());

  for (String token : mainParameters) {
    if (loadedTokens.containsKey(token)) {
      AllocationToken loadedToken = loadedTokens.get(token);
      System.out.println(loadedToken.toString());
      if (loadedToken.getRedemptionHistoryEntry() == null) {
        System.out.printf("Token %s was not redeemed.\n", token);
      } else {
        DomainBase domain =
            domains.get(loadedToken.getRedemptionHistoryEntry().<DomainBase>getParent());
        if (domain == null) {
          System.out.printf("ERROR: Token %s was redeemed but domain can't be loaded.\n", token);
        } else {
          System.out.printf(
              "Token %s was redeemed to create domain %s at %s.\n",
              token, domain.getDomainName(), domain.getCreationTime());
        }
      }
    } else {
      System.out.printf("ERROR: Token %s does not exist.\n", token);
    }
    System.out.println();
  }
}
 
源代码4 项目: levelup-java-examples   文件: PartitionList.java
@Test
public void partition_list_guava() {

	List<List<String>> decisionsBy2 = Lists.partition(playerDecisions, 2);

	logger.info(decisionsBy2);

	assertThat(decisionsBy2.get(0), hasItems("Hit", "Stand"));
	assertThat(decisionsBy2.get(1), hasItems("Double down", "Split"));
	assertThat(decisionsBy2.get(2), hasItems("Surrender"));
}
 
源代码5 项目: dhis2-core   文件: SmsMessageSender.java
private OutboundMessageResponse sendMessage( String subject, String text, Set<String> recipients,
    SmsGatewayConfig gatewayConfig )
{
    OutboundMessageResponse status = null;

    for ( SmsGateway smsGateway : smsGateways )
    {
        if ( smsGateway.accept( gatewayConfig ) )
        {
            List<String> temp = new ArrayList<>( recipients );

            List<List<String>> slices = Lists.partition( temp, MAX_RECIPIENTS_ALLOWED );

            for ( List<String> to: slices )
            {
                log.info( "Sending SMS to " + to );

                status = smsGateway.send( subject, text, new HashSet<>( to ), gatewayConfig );

                handleResponse( status );
            }

            return status;
        }
    }

    return new OutboundMessageResponse( NO_CONFIG, GatewayResponse.NO_GATEWAY_CONFIGURATION, false );
}
 
源代码6 项目: hawkbit   文件: JpaControllerManagement.java
private Void updateLastTargetQueries(final String tenant, final List<TargetPoll> polls) {
    LOG.debug("Persist {} targetqueries.", polls.size());

    final List<List<String>> pollChunks = Lists.partition(
            polls.stream().map(TargetPoll::getControllerId).collect(Collectors.toList()),
            Constants.MAX_ENTRIES_IN_STATEMENT);

    pollChunks.forEach(chunk -> {
        setLastTargetQuery(tenant, System.currentTimeMillis(), chunk);
        chunk.forEach(controllerId -> afterCommit.afterCommit(() -> eventPublisherHolder.getEventPublisher()
                .publishEvent(new TargetPollEvent(controllerId, tenant, eventPublisherHolder.getApplicationId()))));
    });

    return null;
}
 
@Test
public final void givenListPartitioned_whenOriginalListIsModified_thenPartitionsChangeAsWell() {
    // Given
    final List<Integer> intList = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7, 8);
    final List<List<Integer>> subSets = Lists.partition(intList, 3);

    // When
    intList.add(9);
    final List<Integer> lastPartition = subSets.get(2);
    final List<Integer> expectedLastPartition = Lists.<Integer> newArrayList(7, 8, 9);
    assertThat(lastPartition, equalTo(expectedLastPartition));
}
 
源代码8 项目: hmftools   文件: AmberApplication.java
@NotNull
private ListMultimap<Chromosome, TumorContamination> contamination(@NotNull final SamReaderFactory readerFactory,
        @NotNull final ListMultimap<Chromosome, BaseDepth> normalHomSites) throws ExecutionException, InterruptedException {
    final int partitionSize = Math.max(config.minPartition(), normalHomSites.values().size() / config.threadCount());

    LOGGER.info("Processing {} homozygous sites in tumor bam {} for contamination", normalHomSites.size(), config.tumorBamPath());
    final AmberTaskCompletion completion = new AmberTaskCompletion();

    final List<Future<TumorContaminationEvidence>> futures = Lists.newArrayList();
    for (final Chromosome chromosome : normalHomSites.keySet()) {
        for (final List<BaseDepth> chromosomeBafPoints : Lists.partition(normalHomSites.get(chromosome), partitionSize)) {
            if (!chromosomeBafPoints.isEmpty()) {
                final String contig = chromosomeBafPoints.get(0).chromosome();
                final TumorContaminationEvidence evidence = new TumorContaminationEvidence(config.typicalReadDepth(),
                        config.minMappingQuality(),
                        config.minBaseQuality(),
                        contig,
                        config.tumorBamPath(),
                        readerFactory,
                        chromosomeBafPoints);
                futures.add(executorService.submit(completion.task(evidence)));
            }
        }
    }

    final ListMultimap<Chromosome, TumorContamination> result = ArrayListMultimap.create();
    getFuture(futures).forEach(x -> result.putAll(HumanChromosome.fromString(x.contig()), x.evidence()));

    return result;
}
 
源代码9 项目: incubator-gobblin   文件: SalesforceSource.java
/**
 *  Create work units by taking a bulkJobId.
 *  The work units won't contain a query in this case. Instead they will contain a BulkJobId and a list of `batchId:resultId`
 *  So in extractor, the work to do is just to fetch the resultSet files.
 */
private List<WorkUnit> createWorkUnits(
    SourceEntity sourceEntity,
    SourceState state,
    SalesforceExtractor.ResultFileIdsStruct resultFileIdsStruct
) {
  String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
  Extract.TableType tableType = Extract.TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
  String outputTableName = sourceEntity.getDestTableName();
  Extract extract = createExtract(tableType, nameSpaceName, outputTableName);

  List<WorkUnit> workUnits = Lists.newArrayList();
  int partitionNumber = state.getPropAsInt(SOURCE_MAX_NUMBER_OF_PARTITIONS, 1);
  List<BatchIdAndResultId> batchResultIds = resultFileIdsStruct.getBatchIdAndResultIdList();
  int total = batchResultIds.size();

  // size of every partition should be: math.ceil(total/partitionNumber), use simpler way: (total+partitionNumber-1)/partitionNumber
  int sizeOfPartition = (total + partitionNumber - 1) / partitionNumber;
  List<List<BatchIdAndResultId>> partitionedResultIds = Lists.partition(batchResultIds, sizeOfPartition);
  log.info("----partition strategy: max-parti={}, size={}, actual-parti={}, total={}", partitionNumber, sizeOfPartition, partitionedResultIds.size(), total);

  for (List<BatchIdAndResultId> resultIds : partitionedResultIds) {
    WorkUnit workunit = new WorkUnit(extract);
    String bulkJobId = resultFileIdsStruct.getJobId();
    workunit.setProp(PK_CHUNKING_JOB_ID, bulkJobId);
    String resultIdStr = resultIds.stream().map(x -> x.getBatchId() + ":" + x.getResultId()).collect(Collectors.joining(","));
    workunit.setProp(PK_CHUNKING_BATCH_RESULT_ID_PAIRS, resultIdStr);
    workunit.setProp(ConfigurationKeys.SOURCE_ENTITY, sourceEntity.getSourceEntityName());
    workunit.setProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY, sourceEntity.getDestTableName());
    workunit.setProp(WORK_UNIT_STATE_VERSION_KEY, CURRENT_WORK_UNIT_STATE_VERSION);
    addLineageSourceInfo(state, sourceEntity, workunit);
    workUnits.add(workunit);
  }
  return workUnits;
}
 
@Test
public final void givenList_whenParitioningIntoNSublists_thenCorrect() {
    final List<Integer> intList = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7, 8);

    final List<List<Integer>> subSets = Lists.partition(intList, 3);

    // When
    final List<Integer> lastPartition = subSets.get(2);
    final List<Integer> expectedLastPartition = Lists.<Integer> newArrayList(7, 8);
    assertThat(subSets.size(), equalTo(3));
    assertThat(lastPartition, equalTo(expectedLastPartition));
}
 
源代码11 项目: fiat   文件: RedisPermissionsRepository.java
private Table<String, ResourceType, Response<Map<String, String>>> getAllFromRedis(
    Set<String> userIds) {
  if (userIds.size() == 0) {
    return HashBasedTable.create();
  }

  try {
    final Table<String, ResourceType, Response<Map<String, String>>> responseTable =
        ArrayTable.create(
            userIds,
            new ArrayIterator<>(
                resources.stream().map(Resource::getResourceType).toArray(ResourceType[]::new)));
    for (List<String> userIdSubset : Lists.partition(new ArrayList<>(userIds), 10)) {
      redisClientDelegate.withMultiKeyPipeline(
          p -> {
            for (String userId : userIdSubset) {
              resources.stream()
                  .map(Resource::getResourceType)
                  .forEach(
                      r -> {
                        responseTable.put(userId, r, p.hgetAll(userKey(userId, r)));
                      });
            }
            p.sync();
          });
    }
    return responseTable;
  } catch (Exception e) {
    log.error("Storage exception reading all entries.", e);
  }
  return null;
}
 
源代码12 项目: score   文件: RunningExecutionPlanServiceImpl.java
@Override
@Transactional
public int deleteRunningExecutionPlans(Collection<String> executionIds) {
    int count = 0;
    List<List<String>> executionIdsPartitioned = Lists.partition(new ArrayList<>(executionIds), IN_CLAUSE_LIMIT);
    for (List<String> list : executionIdsPartitioned) {
        count += runningExecutionPlanRepository.deleteByExecutionIds(list);
        runningExecutionPlanRepository.flush();
    }
    return count;
}
 
源代码13 项目: levelup-java-examples   文件: ListsExample.java
/**
 * Partition list
 */
@Test
public void partition_list () {
	
	List<String> myList = Lists.newArrayList("one", "two", "three");
	
	List<List<String>> myListBy1 = Lists.partition(myList, 1);

    assertThat(myListBy1.get(0), hasItems("one"));
    assertThat(myListBy1.get(1), hasItems("two"));
    assertThat(myListBy1.get(2), hasItems("three"));
}
 
源代码14 项目: metacat   文件: MysqlUserMetadataService.java
@Override
public void deleteMetadata(final String userId, final List<HasMetadata> holders) {
    try {
        final List<List<HasMetadata>> subLists =
            Lists.partition(holders, config.getUserMetadataMaxInClauseItems());
        for (List<HasMetadata> hasMetadatas : subLists) {
            final List<QualifiedName> names = hasMetadatas.stream()
                .filter(m -> m instanceof HasDefinitionMetadata)
                .map(m -> ((HasDefinitionMetadata) m).getDefinitionName())
                .collect(Collectors.toList());
            if (!names.isEmpty()) {
                _deleteDefinitionMetadata(names);
            }
            if (config.canSoftDeleteDataMetadata()) {
                final List<String> uris = hasMetadatas.stream()
                    .filter(m -> m instanceof HasDataMetadata && ((HasDataMetadata) m).isDataExternal())
                    .map(m -> ((HasDataMetadata) m).getDataUri()).collect(Collectors.toList());
                if (!uris.isEmpty()) {
                    _softDeleteDataMetadata(userId, uris);
                }
            }
        }
    } catch (Exception e) {
        log.error("Failed deleting metadatas", e);
        throw new UserMetadataServiceException("Failed deleting metadatas", e);
    }
}
 
源代码15 项目: dhis2-core   文件: AbstractEventService.java
@Transactional
@Override
public ImportSummaries addEvents( List<Event> events, ImportOptions importOptions, boolean clearSession )
{
    ImportSummaries importSummaries = new ImportSummaries();
    importOptions = updateImportOptions( importOptions );

    List<Event> validEvents = resolveImportableEvents( events, importSummaries );

    List<List<Event>> partitions = Lists.partition( validEvents, FLUSH_FREQUENCY );

    for ( List<Event> _events : partitions )
    {
        reloadUser( importOptions );
        prepareCaches( importOptions.getUser(), _events );

        for ( Event event : _events )
        {
            importSummaries.addImportSummary( addEvent( event, importOptions, true ) );
        }

        if ( clearSession && events.size() >= FLUSH_FREQUENCY )
        {
            clearSession( importOptions.getUser() );
        }
    }

    updateEntities( importOptions.getUser() );

    return importSummaries;
}
 
源代码16 项目: dhis2-core   文件: Validator.java
/**
 * Evaluates validation rules for a collection of organisation units. This
 * method breaks the job down by organisation unit. It assigns the
 * evaluation for each organisation unit to a task that can be evaluated
 * independently in a multi-threaded environment.
 * <p/>
 * Return early with no results if there are no organisation units
 * or no validation rules.
 *
 * @return a collection of any validations that were found
 */
public static Collection<ValidationResult> validate( ValidationRunContext context,
    ApplicationContext applicationContext, AnalyticsService analyticsService )
{
    CategoryService categoryService = applicationContext.getBean( CategoryService.class );
            
    int threadPoolSize = getThreadPoolSize( context );

    if ( threadPoolSize == 0 || context.getPeriodTypeXs().isEmpty() )
    {
        return context.getValidationResults();
    }

    ExecutorService executor = Executors.newFixedThreadPool( threadPoolSize );

    List<List<OrganisationUnit>> orgUnitLists = Lists.partition( context.getOrgUnits(), ValidationRunContext.ORG_UNITS_PER_TASK );

    for ( List<OrganisationUnit> orgUnits : orgUnitLists )
    {
        ValidationTask task = (ValidationTask) applicationContext.getBean( DataValidationTask.NAME );
        task.init( orgUnits, context, analyticsService );

        executor.execute( task );
    }

    executor.shutdown();

    try
    {
        executor.awaitTermination( 6, TimeUnit.HOURS );
    }
    catch ( InterruptedException e )
    {
        executor.shutdownNow();
    }

    reloadAttributeOptionCombos( context.getValidationResults(), categoryService );

    return context.getValidationResults();
}
 
源代码17 项目: StatsAgg   文件: GaugesDao.java
public boolean batchUpsert(List<Gauge> gauges) {
    
    if ((gauges == null) || gauges.isEmpty()) {
        return false;
    }

    try {
        if (DatabaseConfiguration.getType() == DatabaseConfiguration.MYSQL) {
            boolean wasAllUpsertSuccess = true;
            List<List<Gauge>> gaugesPartitions = Lists.partition(gauges, 1000);

            for (List<Gauge> gaugesPartition : gaugesPartitions) {
                List<Object> parameters = new ArrayList<>();

                for (Gauge gauge : gaugesPartition) {
                    parameters.add(gauge.getBucketSha1());
                    parameters.add(gauge.getBucket());
                    parameters.add(gauge.getMetricValue());
                    parameters.add(gauge.getLastModified());
                }

                boolean wasUpsertSuccess = genericDmlStatement(GaugesSql.generateBatchUpsert(gaugesPartition.size()), parameters);
                if (!wasUpsertSuccess) wasAllUpsertSuccess = false;
            }

            return wasAllUpsertSuccess;
        }
        else {
            return upsert(gauges, true);
        }
    }
    catch (Exception e) {
        logger.error(e.toString() + System.lineSeparator() + StackTrace.getStringFromStackTrace(e));
        return false;
    }
    
}
 
源代码18 项目: nomulus   文件: ListDomainsAction.java
@Override
public ImmutableSet<DomainBase> loadObjects() {
  checkArgument(!tlds.isEmpty(), "Must specify TLDs to query");
  assertTldsExist(tlds);
  DateTime now = clock.nowUtc();
  ImmutableList.Builder<DomainBase> domainsBuilder = new ImmutableList.Builder<>();
  for (List<String> tldsBatch : Lists.partition(tlds.asList(), maxNumSubqueries)) {
    domainsBuilder.addAll(
        ofy()
            .load()
            .type(DomainBase.class)
            .filter("tld in", tldsBatch)
            // Get the N most recently created domains (requires ordering in descending order).
            .order("-creationTime")
            .limit(limit)
            .list()
            .stream()
            .map(EppResourceUtils.transformAtTime(now))
            // Deleted entities must be filtered out post-query because queries don't allow
            // ordering with two filters.
            .filter(d -> d.getDeletionTime().isAfter(now))
            .collect(toImmutableList()));
  }
  // Combine the batches together by sorting all domains together with newest first, applying the
  // limit, and then reversing for display order.
  return ImmutableSet.copyOf(
      domainsBuilder
          .build()
          .stream()
          .sorted(comparing(EppResource::getCreationTime).reversed())
          .limit(limit)
          .collect(toImmutableList())
          .reverse());
}
 
源代码19 项目: JuniperBot   文件: AudioMessageManager.java
public void onQueue(PlaybackInstance instance, MessageChannel sourceChannel, BotContext context, int pageNum) {
    List<TrackRequest> requests = instance.getQueue();
    if (requests.isEmpty()) {
        onEmptyQueue(sourceChannel);
        return;
    }

    final int pageSize = 25;
    List<List<TrackRequest>> parts = Lists.partition(requests, pageSize);
    final int totalPages = parts.size();
    final int offset = (pageNum - 1) * pageSize + 1 + instance.getCursor();

    final long totalDuration = requests.stream()
            .filter(Objects::nonNull)
            .map(TrackRequest::getTrack)
            .filter(Objects::nonNull)
            .mapToLong(AudioTrack::getDuration).sum();

    if (pageNum > totalPages) {
        onQueueError(sourceChannel, "discord.command.audio.queue.list.totalPages", parts.size());
        return;
    }
    List<TrackRequest> pageRequests = parts.get(pageNum - 1);

    EmbedBuilder builder = getQueueMessage();
    if (instance.getCursor() > 0) {
        builder.setDescription(messageService.getMessage("discord.command.audio.queue.list.playlist.played",
                instance.getCursor(), commonProperties.getBranding().getWebsiteUrl(), instance.getPlaylistUuid()));
    } else {
        builder.setDescription(messageService.getMessage("discord.command.audio.queue.list.playlist",
                commonProperties.getBranding().getWebsiteUrl(), instance.getPlaylistUuid()));
    }

    addQueue(builder, instance, pageRequests, offset, false);

    String queueCommand = messageService.getMessageByLocale("discord.command.queue.key", context.getCommandLocale());

    builder.setFooter(totalPages > 1
            ? messageService.getMessage("discord.command.audio.queue.list.pageFooter",
            pageNum, totalPages, requests.size(), CommonUtils.formatDuration(totalDuration),
            context.getConfig().getPrefix(), queueCommand)
            : messageService.getMessage("discord.command.audio.queue.list.footer",
            requests.size(), CommonUtils.formatDuration(totalDuration)), null);
    messageService.sendMessageSilent(sourceChannel::sendMessage, builder.build());
}
 
源代码20 项目: EasySRL   文件: ClassifierTrainer.java
@Override
public IterationResult getValues(final double[] featureWeights) {
	final Collection<Callable<IterationResult>> tasks = new ArrayList<>();

	int totalCorrect = 0;
	final int batchSize = trainingData.size() / numThreads;
	for (final List<CachedTrainingExample<L>> batch : Lists.partition(trainingData, batchSize)) {
		tasks.add(new Callable<IterationResult>() {
			@Override
			public IterationResult call() throws Exception {
				return lossFunction.getValues(featureWeights, batch);
			}
		});
	}

	final ExecutorService executor = Executors.newFixedThreadPool(numThreads);
	List<Future<IterationResult>> results;
	try {
		results = executor.invokeAll(tasks);

		// FunctionValues total = new FunctionValues(0.0, new
		// double[featureWeights.length]);

		final double[] totalGradient = new double[featureWeights.length];
		double totalLoss = 0.0;

		for (final Future<IterationResult> result : results) {
			final IterationResult values = result.get();
			totalLoss += values.functionValue;
			Util.add(totalGradient, values.gradient);
			totalCorrect += values.correctPredictions;
		}
		executor.shutdown(); // always reclaim resources

		System.out.println();

		System.out.println("Training accuracy=" + Util.twoDP(100.0 * totalCorrect / trainingData.size()));
		System.out.println("Loss=" + Util.twoDP(totalLoss));
		return new IterationResult(totalCorrect, totalLoss, totalGradient);

	} catch (InterruptedException | ExecutionException e) {
		throw new RuntimeException(e);
	}
}