org.apache.commons.lang3.tuple.Pair#getLeft ( )源码实例Demo

下面列出了org.apache.commons.lang3.tuple.Pair#getLeft ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: TagRec   文件: SustainCalculator.java
private double calculateResourceActivations(int userId, int resource, double beta, double r){

	Set<Integer> topics = this.resTopicTrainList.get(resource).keySet();
	
	// Vector, write 1 for every existing topic
	GVector currentResource = new GVector(this.numberOfTopics);
	currentResource.zero();
	for (Integer t : topics)
		currentResource.setElement(t, 1);
	
	double maxActivation = 0.0;
	double totalActivation = 0.0;
	
	for (GVector c : this.userClusterList.get(userId)){
		Pair<Double, GVector> activationPair = this.calculateActivation(currentResource, c, this.userLambdaList.get(userId), r);
		if (activationPair.getLeft()>maxActivation){
			maxActivation = activationPair.getLeft();
		}
		totalActivation+= activationPair.getLeft();
	}
	
	maxActivation = Math.pow(maxActivation, beta)/Math.pow(totalActivation, beta)*maxActivation;
	return maxActivation;
}
 
源代码2 项目: cloudbreak   文件: SdxServiceTest.java
@Test
void testCreateNOTInternalSdxClusterFromLightDutyTemplateWhenBaseLocationSpecifiedShouldCreateStackRequestWithSettedUpBaseLocation()
        throws IOException, TransactionExecutionException {
    when(transactionService.required(isA(Supplier.class))).thenAnswer(invocation -> invocation.getArgument(0, Supplier.class).get());
    String lightDutyJson = FileReaderUtils.readFileFromClasspath("/runtime/7.1.0/aws/light_duty.json");
    when(cdpConfigService.getConfigForKey(any())).thenReturn(JsonUtil.readValue(lightDutyJson, StackV4Request.class));
    //doNothing().when(cloudStorageLocationValidator.validate("s3a://some/dir", ));
    SdxClusterRequest sdxClusterRequest = new SdxClusterRequest();
    sdxClusterRequest.setClusterShape(LIGHT_DUTY);
    sdxClusterRequest.setEnvironment("envir");
    SdxCloudStorageRequest cloudStorage = new SdxCloudStorageRequest();
    cloudStorage.setFileSystemType(FileSystemType.S3);
    cloudStorage.setBaseLocation("s3a://some/dir");
    cloudStorage.setS3(new S3CloudStorageV1Parameters());
    sdxClusterRequest.setCloudStorage(cloudStorage);
    long id = 10L;
    when(sdxClusterRepository.save(any(SdxCluster.class))).thenAnswer(invocation -> {
        SdxCluster sdxWithId = invocation.getArgument(0, SdxCluster.class);
        sdxWithId.setId(id);
        return sdxWithId;
    });
    mockEnvironmentCall(sdxClusterRequest, CloudPlatform.AWS);
    Pair<SdxCluster, FlowIdentifier> result = underTest.createSdx(USER_CRN, CLUSTER_NAME, sdxClusterRequest, null);
    SdxCluster createdSdxCluster = result.getLeft();
    assertEquals("s3a://some/dir", createdSdxCluster.getCloudStorageBaseLocation());
}
 
源代码3 项目: terrapin   文件: OnlineOfflineStateModelFactory.java
@Transition(from = "OFFLINE", to = "ONLINE")
public void onBecomeOnlineFromOffline(Message message,
                                      NotificationContext context) {
  Pair<String, String> hdfsPathAndPartition = getHdfsPathAndPartitionNum(message);
  String hdfsPath = hdfsPathAndPartition.getLeft();
  LOG.info("Opening " + hdfsPath);
  try {
    // TODO(varun): Maybe retry here.
    HColumnDescriptor family = new HColumnDescriptor(Constants.HFILE_COLUMN_FAMILY);
    family.setBlockCacheEnabled(isBlockCacheEnabled);
    Reader r = readerFactory.createHFileReader(hdfsPath, new CacheConfig(conf, family));
    resourcePartitionMap.addReader(
        message.getResourceName(), hdfsPathAndPartition.getRight(), r);
  } catch (IOException e) {
    throw new RuntimeException(e);
  }
}
 
源代码4 项目: AvatarMQ   文件: SendMessageCache.java
public void parallelDispatch(LinkedList<MessageDispatchTask> list) {
    List<Callable<Void>> tasks = new ArrayList<Callable<Void>>();
    int startPosition = 0;
    Pair<Integer, Integer> pair = calculateBlocks(list.size(), list.size());
    int numberOfThreads = pair.getRight();
    int blocks = pair.getLeft();

    for (int i = 0; i < numberOfThreads; i++) {
        MessageDispatchTask[] task = new MessageDispatchTask[blocks];
        phaser.register();
        System.arraycopy(list.toArray(), startPosition, task, 0, blocks);
        tasks.add(new SendMessageTask(phaser, task));
        startPosition += blocks;
    }

    ExecutorService executor = Executors.newFixedThreadPool(numberOfThreads);
    for (Callable<Void> element : tasks) {
        executor.submit(element);
    }
}
 
源代码5 项目: alf.io   文件: EventManagerIntegrationTest.java
@Test
public void testValidationBoundedFailedPendingTickets() {
    List<TicketCategoryModification> categories = Collections.singletonList(
        new TicketCategoryModification(null, "default", 10,
            new DateTimeModification(LocalDate.now(), LocalTime.now()),
            new DateTimeModification(LocalDate.now(), LocalTime.now()),
            DESCRIPTION, BigDecimal.TEN, false, "", true, null, null, null, null, null, 0, null, null, AlfioMetadata.empty()));
    Pair<Event, String> pair = initEvent(categories, organizationRepository, userManager, eventManager, eventRepository);
    Event event = pair.getLeft();
    String username = pair.getRight();

    TicketCategory category = ticketCategoryRepository.findAllTicketCategories(event.getId()).get(0);
    Map<String, String> categoryDescription = ticketCategoryDescriptionRepository.descriptionForTicketCategory(category.getId());

    List<Integer> tickets = ticketRepository.selectTicketInCategoryForUpdate(event.getId(), category.getId(), 1, Collections.singletonList(Ticket.TicketStatus.FREE.name()));
    String reservationId = "12345678";
    ticketReservationRepository.createNewReservation(reservationId, ZonedDateTime.now(), DateUtils.addDays(new Date(), 1), null, "en", event.getId(), event.getVat(), event.isVatIncluded(), event.getCurrency());
    ticketRepository.reserveTickets(reservationId, tickets, category.getId(), "en", 100, "CHF");
    TicketCategoryModification tcm = new TicketCategoryModification(category.getId(), category.getName(), 10,
        DateTimeModification.fromZonedDateTime(category.getUtcInception()),
        DateTimeModification.fromZonedDateTime(category.getUtcExpiration()),
        categoryDescription, category.getPrice(), false, "", false, null, null, null, null, null, 0, null, null, AlfioMetadata.empty());
    Result<TicketCategory> result = eventManager.updateCategory(category.getId(), event, tcm, username);
    assertFalse(result.isSuccess());
}
 
源代码6 项目: gatk   文件: CalculateContamination.java
@Override
public Object doWork() {
    final Pair<String, List<PileupSummary>> sampleAndsites = PileupSummary.readFromFile(inputPileupSummariesTable);
    final String sample = sampleAndsites.getLeft();
    final List<PileupSummary> sites = filterSitesByCoverage(sampleAndsites.getRight());

    // used the matched normal to genotype (i.e. find hom alt sites) if available
    final List<PileupSummary> genotypingSites = matchedPileupSummariesTable == null ? sites :
            filterSitesByCoverage(PileupSummary.readFromFile(matchedPileupSummariesTable).getRight());

    final ContaminationModel genotypingModel = new ContaminationModel(genotypingSites);

    if (outputTumorSegmentation != null) {
        final ContaminationModel tumorModel = matchedPileupSummariesTable == null ? genotypingModel : new ContaminationModel(sites);
        MinorAlleleFractionRecord.writeToFile(sample, tumorModel.segmentationRecords(), outputTumorSegmentation);
    }

    final Pair<Double, Double> contaminationAndError = genotypingModel.calculateContaminationFromHoms(sites);
    ContaminationRecord.writeToFile(Arrays.asList(
            new ContaminationRecord(sample, contaminationAndError.getLeft(), contaminationAndError.getRight())), outputTable);

    return "SUCCESS";
}
 
源代码7 项目: owltools   文件: GpadGpiObjectsBuilder.java
private boolean addEvidenceCode(String eco, GeneAnnotation ga, SimpleEcoMapper mapper) {
	Pair<String,String> pair = mapper.getGoCode(eco);
	boolean added = false;
	if (pair != null) {
		String goCode = pair.getLeft();
		if (goCode != null) {
			ga.setEvidence(goCode, eco);
			added = true;
		}
	}
	if (added == false) {
		boolean fatal = gpadIncludeUnmappedECO == false;
		reportEvidenceIssue(eco, "No corresponding GO evidence code found", fatal);
		if (fatal) {
			return false;
		}
		// fallback always add the ECO class at least
		ga.setEvidence(null, eco);
	}
	return true;
}
 
源代码8 项目: litematica   文件: AreaSelection.java
protected void updateCalculatedOrigin()
{
    Pair<BlockPos, BlockPos> pair = PositionUtils.getEnclosingAreaCorners(this.subRegionBoxes.values());

    if (pair != null)
    {
        this.calculatedOrigin = pair.getLeft();
    }
    else
    {
        this.calculatedOrigin = BlockPos.ORIGIN;
    }

    this.calculatedOriginDirty = false;
}
 
@Override
public TableReadContext load(TableRuntimeContext tableRuntimeContext) throws StageException, SQLException {
  Pair<String, List<Pair<Integer, String>>> queryAndParamValToSet;
  final boolean nonIncremental = tableRuntimeContext.isUsingNonIncrementalLoad();
  if (nonIncremental) {
    final String baseTableQuery = OffsetQueryUtil.buildBaseTableQuery(tableRuntimeContext, quoteChar);
    queryAndParamValToSet = Pair.of(baseTableQuery, Collections.emptyList());
  } else {
    queryAndParamValToSet = OffsetQueryUtil.buildAndReturnQueryAndParamValToSet(
        tableRuntimeContext,
        offsets.get(tableRuntimeContext.getOffsetKey()),
        quoteChar,
        tableJdbcELEvalContext
    );
  }

  if (isReconnect) {
    LOGGER.debug("close the connection");
    connectionManager.closeConnection();
  }

  return new TableReadContext(
      connectionManager.getVendor(),
      connectionManager.getConnection(),
      queryAndParamValToSet.getLeft(),
      queryAndParamValToSet.getRight(),
      fetchSize,
      nonIncremental
  );
}
 
源代码10 项目: alf.io   文件: TicketReservationManagerUnitTest.java
@Test
public void calcReservationCostWithASVatNotIncludedASNone() {
    initReservationWithAdditionalServices(false, AdditionalService.VatType.NONE, 10, 10);
    //fourth: event price vat not included, additional service VAT n/a
    Pair<TotalPrice, Optional<PromoCodeDiscount>> priceAndDiscount = manager.totalReservationCostWithVAT(TICKET_RESERVATION_ID);
    TotalPrice fourth = priceAndDiscount.getLeft();
    assertTrue(priceAndDiscount.getRight().isEmpty());
    assertEquals(21, fourth.getPriceWithVAT());
    assertEquals(1, fourth.getVAT());
}
 
源代码11 项目: pulsar   文件: EntryCacheImpl.java
@Override
public Pair<Integer, Long> evictEntries(long sizeToFree) {
    checkArgument(sizeToFree > 0);
    Pair<Integer, Long> evicted = entries.evictLeastAccessedEntries(sizeToFree);
    int evictedEntries = evicted.getLeft();
    long evictedSize = evicted.getRight();
    if (log.isDebugEnabled()) {
        log.debug(
                "[{}] Doing cache eviction of at least {} Mb -- Deleted {} entries - Total size deleted: {} Mb "
                        + " -- Current Size: {} Mb",
                ml.getName(), sizeToFree / MB, evictedEntries, evictedSize / MB, entries.getSize() / MB);
    }
    manager.entriesRemoved(evictedSize);
    return evicted;
}
 
源代码12 项目: pulsar   文件: ManagedCursorImpl.java
void initializeCursorPosition(Pair<PositionImpl, Long> lastPositionCounter) {
    readPosition = ledger.getNextValidPosition(lastPositionCounter.getLeft());
    markDeletePosition = lastPositionCounter.getLeft();

    // Initialize the counter such that the difference between the messages written on the ML and the
    // messagesConsumed is 0, to ensure the initial backlog count is 0.
    messagesConsumedCounter = lastPositionCounter.getRight();
}
 
源代码13 项目: maze-harvester   文件: PaperOptions.java
private static double parseMargin(String specifier) throws ParseException {
  Pair<Double, String> parts = parseUnits(specifier);
  double scale = parts.getLeft();
  double measure = Double.parseDouble(parts.getRight());
  return measure * scale;
}
 
源代码14 项目: conductor   文件: ForkJoinDynamicTaskMapper.java
/**
 * This method gets the list of tasks that need to scheduled when the the task to scheduled is of type {@link TaskType#FORK_JOIN_DYNAMIC}.
 * Creates a Fork Task, followed by the Dynamic tasks and a final JOIN task.
 * <p>The definitions of the dynamic forks that need to be scheduled are available in the {@link WorkflowTask#getInputParameters()}
 * which are accessed using the {@link TaskMapperContext#getTaskToSchedule()}. The dynamic fork task definitions are referred by a key value either by
 * {@link WorkflowTask#getDynamicForkTasksParam()} or by {@link WorkflowTask#getDynamicForkJoinTasksParam()}
 * </p>
 * When creating the list of tasks to be scheduled a set of preconditions are validated:
 * <ul>
 * <li>If the input parameter representing the Dynamic fork tasks is available as part of {@link WorkflowTask#getDynamicForkTasksParam()} then
 * the input for the dynamic task is validated to be a map by using {@link WorkflowTask#getDynamicForkTasksInputParamName()}</li>
 * <li>If the input parameter representing the Dynamic fork tasks is available as part of {@link WorkflowTask#getDynamicForkJoinTasksParam()} then
 * the input for the dynamic tasks is available in the payload of the tasks definition.
 * </li>
 * <li>A check is performed that the next following task in the {@link WorkflowDef} is a {@link TaskType#JOIN}</li>
 * </ul>
 *
 * @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link WorkflowDef}, {@link Workflow} and a string representation of the TaskId
 * @throws TerminateWorkflowException In case of:
 *                                    <ul>
 *                                    <li>
 *                                    When the task after {@link TaskType#FORK_JOIN_DYNAMIC} is not a {@link TaskType#JOIN}
 *                                    </li>
 *                                    <li>
 *                                    When the input parameters for the dynamic tasks are not of type {@link Map}
 *                                    </li>
 *                                    </ul>
 * @return: List of tasks in the following order:
 * <ul>
 * <li>
 * {@link SystemTaskType#FORK} with {@link Task.Status#COMPLETED}
 * </li>
 * <li>
 * Might be any kind of task, but this is most cases is a UserDefinedTask with {@link Task.Status#SCHEDULED}
 * </li>
 * <li>
 * {@link SystemTaskType#JOIN} with {@link Task.Status#IN_PROGRESS}
 * </li>
 * </ul>
 */
@Override
public List<Task> getMappedTasks(TaskMapperContext taskMapperContext) throws TerminateWorkflowException {
    logger.debug("TaskMapperContext {} in ForkJoinDynamicTaskMapper", taskMapperContext);

    WorkflowTask taskToSchedule = taskMapperContext.getTaskToSchedule();
    Workflow workflowInstance = taskMapperContext.getWorkflowInstance();
    String taskId = taskMapperContext.getTaskId();
    int retryCount = taskMapperContext.getRetryCount();

    List<Task> mappedTasks = new LinkedList<>();
    //Get the list of dynamic tasks and the input for the tasks
    Pair<List<WorkflowTask>, Map<String, Map<String, Object>>> workflowTasksAndInputPair =
            Optional.ofNullable(taskToSchedule.getDynamicForkTasksParam())
                    .map(dynamicForkTaskParam -> getDynamicForkTasksAndInput(taskToSchedule, workflowInstance, dynamicForkTaskParam))
                    .orElseGet(() -> getDynamicForkJoinTasksAndInput(taskToSchedule, workflowInstance));

    List<WorkflowTask> dynForkTasks = workflowTasksAndInputPair.getLeft();
    Map<String, Map<String, Object>> tasksInput = workflowTasksAndInputPair.getRight();

    // Create Fork Task which needs to be followed by the dynamic tasks
    Task forkDynamicTask = createDynamicForkTask(taskToSchedule, workflowInstance, taskId, dynForkTasks);

    mappedTasks.add(forkDynamicTask);

    List<String> joinOnTaskRefs = new LinkedList<>();
    //Add each dynamic task to the mapped tasks and also get the last dynamic task in the list,
    // which indicates that the following task after that needs to be a join task
    for (WorkflowTask wft : dynForkTasks) {//TODO this is a cyclic dependency, break it out using function composition
        List<Task> forkedTasks = taskMapperContext.getDeciderService().getTasksToBeScheduled(workflowInstance, wft, retryCount);
        for (Task forkedTask : forkedTasks) {
            Map<String, Object> forkedTaskInput = tasksInput.get(forkedTask.getReferenceTaskName());
            forkedTask.getInputData().putAll(forkedTaskInput);
        }
        mappedTasks.addAll(forkedTasks);
        //Get the last of the dynamic tasks so that the join can be performed once this task is done
        Task last = forkedTasks.get(forkedTasks.size() - 1);
        joinOnTaskRefs.add(last.getReferenceTaskName());
    }

    //From the workflow definition get the next task and make sure that it is a JOIN task.
    //The dynamic fork tasks need to be followed by a join task
    WorkflowTask joinWorkflowTask = workflowInstance
            .getWorkflowDefinition()
            .getNextTask(taskToSchedule.getTaskReferenceName());

    if (joinWorkflowTask == null || !joinWorkflowTask.getType().equals(TaskType.JOIN.name())) {
        throw new TerminateWorkflowException("Dynamic join definition is not followed by a join task.  Check the blueprint");
    }

    // Create Join task
    HashMap<String, Object> joinInput = new HashMap<>();
    joinInput.put("joinOn", joinOnTaskRefs);
    Task joinTask = createJoinTask(workflowInstance, joinWorkflowTask, joinInput);
    mappedTasks.add(joinTask);

    return mappedTasks;
}
 
源代码15 项目: azure-cosmosdb-java   文件: PathsHelper.java
public static boolean validateDocumentId(String resourceIdString) {
    Pair<Boolean, ResourceId> pair = ResourceId.tryParse(resourceIdString);
    return pair.getLeft() && pair.getRight().getDocument() != 0;
}
 
源代码16 项目: incubator-gobblin   文件: UrlTrie.java
/**
 * prefix is different from RootPage that the RootPage has an extra char in the end. And this last char will be used to construct the root node of the trie.
 */
public UrlTrie(String rootPage, UrlTrieNode root) {
  Pair<String, UrlTrieNode> defaults = getPrefixAndDefaultRoot(rootPage);
  _prefix = defaults.getLeft();
  _root = root;
}
 
源代码17 项目: systemds   文件: LibFederatedAgg.java
public static MatrixBlock aggregateUnaryMatrix(MatrixObject federatedMatrix, AggregateUnaryOperator operator) {
	// find out the characteristics after aggregation
	MatrixCharacteristics mc = new MatrixCharacteristics();
	operator.indexFn.computeDimension(federatedMatrix.getDataCharacteristics(), mc);
	// make outBlock right size
	MatrixBlock ret = new MatrixBlock((int) mc.getRows(), (int) mc.getCols(), operator.aggOp.initialValue);
	List<Pair<FederatedRange, Future<FederatedResponse>>> idResponsePairs = new ArrayList<>();
	// distribute aggregation operation to all workers
	for (Map.Entry<FederatedRange, FederatedData> entry : federatedMatrix.getFedMapping().entrySet()) {
		FederatedData fedData = entry.getValue();
		if (!fedData.isInitialized())
			throw new DMLRuntimeException("Not all FederatedData was initialized for federated matrix");
		Future<FederatedResponse> future = fedData.executeFederatedOperation(
			new FederatedRequest(FederatedRequest.FedMethod.AGGREGATE, operator), true);
		idResponsePairs.add(new ImmutablePair<>(entry.getKey(), future));
	}
	try {
		//TODO replace with block operations
		for (Pair<FederatedRange, Future<FederatedResponse>> idResponsePair : idResponsePairs) {
			FederatedRange range = idResponsePair.getLeft();
			FederatedResponse federatedResponse = idResponsePair.getRight().get();
			int[] beginDims = range.getBeginDimsInt();
			MatrixBlock mb = (MatrixBlock) federatedResponse.getData()[0];
			// TODO performance optimizations
			MatrixValue.CellIndex cellIndex = new MatrixValue.CellIndex(0, 0);
			ValueFunction valueFn = operator.aggOp.increOp.fn;
			// Add worker response to resultBlock
			for (int r = 0; r < mb.getNumRows(); r++)
				for (int c = 0; c < mb.getNumColumns(); c++) {
					// Get the output index where the result should be placed by the index function
					// -> map input row/col to output row/col
					cellIndex.set(r + beginDims[0], c + beginDims[1]);
					operator.indexFn.execute(cellIndex, cellIndex);
					int resultRow = cellIndex.row;
					int resultCol = cellIndex.column;
					double newValue;
					if (valueFn instanceof KahanFunction) {
						// TODO iterate along correct axis to use correction correctly
						// temporary solution to execute correct overloaded method
						KahanObject kobj = new KahanObject(ret.quickGetValue(resultRow, resultCol), 0);
						newValue = ((KahanObject) valueFn.execute(kobj, mb.quickGetValue(r, c)))._sum;
					}
					else {
						// TODO special handling for `ValueFunction`s which do not implement `.execute(double, double)`
						// "Add" two partial calculations together with ValueFunction
						newValue = valueFn.execute(ret.quickGetValue(resultRow, resultCol), mb.quickGetValue(r, c));
					}
					ret.quickSetValue(resultRow, resultCol, newValue);
				}
		}
	}
	catch (Exception e) {
		throw new DMLRuntimeException("Federated binary aggregation failed", e);
	}
	return ret;
}
 
源代码18 项目: ExternalPlugins   文件: CustomSwapper.java
private void executePair(Pair<Tab, Rectangle> pair)
{
	switch (pair.getLeft())
	{
		case COMBAT:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.COMBAT.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case EQUIPMENT:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.EQUIPMENT.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case INVENTORY:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.INVENTORY.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case PRAYER:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.PRAYER.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
		case SPELLBOOK:
			if (client.getVar(VarClientInt.INTERFACE_TAB) != InterfaceTab.SPELLBOOK.getId())
			{
				robot.delay((int) getMillis());
				robot.keyPress(utils.getTabHotkey(pair.getLeft()));
				robot.delay((int) getMillis());
			}
			utils.click(pair.getRight());
			break;
	}
}
 
源代码19 项目: act   文件: PubchemSynonymFinder.java
public static void main(String[] args) throws Exception {
  org.apache.commons.cli.Options opts = new org.apache.commons.cli.Options();
  for (Option.Builder b : OPTION_BUILDERS) {
    opts.addOption(b.build());
  }

  CommandLine cl = null;
  try {
    CommandLineParser parser = new DefaultParser();
    cl = parser.parse(opts, args);
  } catch (ParseException e) {
    System.err.format("Argument parsing failed: %s\n", e.getMessage());
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    System.exit(1);
  }

  if (cl.hasOption("help")) {
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    return;
  }

  File rocksDBFile = new File(cl.getOptionValue(OPTION_INDEX_PATH));
  if (!rocksDBFile.isDirectory()) {
    System.err.format("Index directory does not exist or is not a directory at '%s'", rocksDBFile.getAbsolutePath());
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    System.exit(1);
  }

  List<String> compoundIds = null;
  if (cl.hasOption(OPTION_PUBCHEM_COMPOUND_ID)) {
    compoundIds = Collections.singletonList(cl.getOptionValue(OPTION_PUBCHEM_COMPOUND_ID));
  } else if (cl.hasOption(OPTION_IDS_FILE)) {
    File idsFile = new File(cl.getOptionValue(OPTION_IDS_FILE));
    if (!idsFile.exists()) {
      System.err.format("Cannot find Pubchem CIDs file at %s", idsFile.getAbsolutePath());
      HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
      System.exit(1);
    }

    compoundIds = getCIDsFromFile(idsFile);

    if (compoundIds.size() == 0) {
      System.err.format("Found zero Pubchem CIDs to process in file at '%s', exiting", idsFile.getAbsolutePath());
      HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
      System.exit(1);
    }
  } else {
    System.err.format("Must specify one of '%s' or '%s'; index is too big to print all synonyms.",
        OPTION_PUBCHEM_COMPOUND_ID, OPTION_IDS_FILE);
    HELP_FORMATTER.printHelp(PubchemSynonymFinder.class.getCanonicalName(), HELP_MESSAGE, opts, null, true);
    System.exit(1);
  }

  // Run a quick check to warn users of malformed ids.
  compoundIds.forEach(x -> {
    if (!PC_CID_PATTERN.matcher(x).matches()) { // Use matches() for complete matching.
      LOGGER.warn("Specified compound id does not match expected format: %s", x);
    }
  });

  LOGGER.info("Opening DB and searching for %d Pubchem CIDs", compoundIds.size());
  Pair<RocksDB, Map<PubchemTTLMerger.COLUMN_FAMILIES, ColumnFamilyHandle>> dbAndHandles = null;
  Map<String, PubchemSynonyms> results = new LinkedHashMap<>(compoundIds.size());
  try {
    dbAndHandles = PubchemTTLMerger.openExistingRocksDB(rocksDBFile);
    RocksDB db = dbAndHandles.getLeft();
    ColumnFamilyHandle cidToSynonymsCfh =
        dbAndHandles.getRight().get(PubchemTTLMerger.COLUMN_FAMILIES.CID_TO_SYNONYMS);

    for (String cid : compoundIds) {
      PubchemSynonyms synonyms = null;
      byte[] val = db.get(cidToSynonymsCfh, cid.getBytes(UTF8));
      if (val != null) {
        ObjectInputStream oi = new ObjectInputStream(new ByteArrayInputStream(val));
        // We're relying on our use of a one-value-type per index model here so we can skip the instanceof check.
        synonyms = (PubchemSynonyms) oi.readObject();
      } else {
        LOGGER.warn("No synonyms available for compound id '%s'", cid);
      }
      results.put(cid, synonyms);
    }
  } finally {
    if (dbAndHandles != null) {
      dbAndHandles.getLeft().close();
    }
  }

  try (OutputStream outputStream =
           cl.hasOption(OPTION_OUTPUT) ? new FileOutputStream(cl.getOptionValue(OPTION_OUTPUT)) : System.out) {
    OBJECT_MAPPER.writerWithDefaultPrettyPrinter().writeValue(outputStream, results);
    new OutputStreamWriter(outputStream).append('\n');
  }
  LOGGER.info("Done searching for Pubchem synonyms");
}
 
源代码20 项目: emodb   文件: ChannelAllocationState.java
public synchronized SlabAllocation allocate(PeekingIterator<Integer> eventSizes) {
    checkArgument(eventSizes.hasNext());

    if (!isAttached()) {
        return null;
    }

    int remaining = Constants.MAX_SLAB_SIZE - _slabConsumed;

    Pair<Integer, Integer> countAndBytesConsumed = DefaultSlabAllocator.defaultAllocationCount(_slabConsumed, _slabBytesConsumed, eventSizes);

    int offsetForNewAllocation = _slabConsumed;

    _slabConsumed += countAndBytesConsumed.getLeft();
    _slabBytesConsumed += countAndBytesConsumed.getRight();

    // Check for case where no more slots could be allocated because slab is full either because
    // the max # of slots is consumed or the max # of bytes is consumed
    if (countAndBytesConsumed.getLeft() == 0) {

        detach().release();
        return null;

    }

    if (countAndBytesConsumed.getLeft() < remaining) {

        // All events fit in current slab, leave it attached
        return new DefaultSlabAllocation(_slab.addRef(), offsetForNewAllocation, countAndBytesConsumed.getLeft());

    } else {

        // Whatever is left of this slab is consumed. Return the rest of the slab. Detach from it so we'll allocate a new one next time.
        return new DefaultSlabAllocation(detach(), offsetForNewAllocation, countAndBytesConsumed.getLeft());

    }
}