类org.apache.commons.lang3.time.StopWatch源码实例Demo

下面列出了怎么用org.apache.commons.lang3.time.StopWatch的API类实例代码及写法,或者点击链接到github查看源代码。

/**
 * {@inheritDoc}
 */
@Override
public void warmUpCache(Context context) {
    for (Map.Entry<String, Integer> entry : getDescriptorPreloadFolders().entrySet()) {
        String treeRoot = entry.getKey();
        int depth = entry.getValue();
        StopWatch stopWatch = new StopWatch();

        logger.info("Starting preload of tree [{}] with depth {}", treeRoot, depth);

        stopWatch.start();

        try {
            contentStoreService.getTree(context, treeRoot, depth);
        } catch (Exception e) {
            logger.error("Error while preloading tree at [{}]", treeRoot, e);
        }

        stopWatch.stop();

        logger.info("Preload of tree [{}] with depth {} completed in {} secs", treeRoot, depth,
                    stopWatch.getTime(TimeUnit.SECONDS));
    }
}
 
源代码2 项目: WeEvent   文件: AMOPChannel.java
public ChannelResponse sendEvent(String topic, FileEvent fileEvent) throws BrokerException {
    if (this.subTopics.contains(topic) || this.subVerifyTopics.containsKey(topic)) {
        log.error("this is already receiver side for topic: {}", topic);
        throw new BrokerException(ErrorCode.FILE_SENDER_RECEIVER_CONFLICT);
    }

    byte[] json = JsonHelper.object2JsonBytes(fileEvent);
    ChannelRequest channelRequest = new ChannelRequest();
    channelRequest.setToTopic(topic);
    channelRequest.setMessageID(this.service.newSeq());
    channelRequest.setTimeout(this.service.getConnectSeconds() * 1000);
    channelRequest.setContent(json);

    log.info("send channel request, topic: {} {} id: {}", channelRequest.getToTopic(), fileEvent.getEventType(), channelRequest.getMessageID());
    ChannelResponse rsp;
    StopWatch sw = StopWatch.createStarted();
    if (this.senderVerifyTopics.containsKey(topic)) {
        log.info("over verified AMOP channel");
        rsp = this.service.sendChannelMessageForVerifyTopic(channelRequest);
    } else {
        rsp = this.service.sendChannelMessage2(channelRequest);
    }
    sw.stop();
    log.info("receive channel response, id: {} result: {}-{} cost: {}", rsp.getMessageID(), rsp.getErrorCode(), rsp.getErrorMessage(), sw.getTime());
    return rsp;
}
 
源代码3 项目: sqlg   文件: BaseTest.java
@Before
public void before() throws Exception {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    this.sqlgGraph = SqlgGraph.open(configuration);
    SqlgUtil.dropDb(this.sqlgGraph);
    this.sqlgGraph.tx().commit();
    this.sqlgGraph.close();
    this.sqlgGraph = SqlgGraph.open(configuration);
    grantReadOnlyUserPrivileges();
    assertNotNull(this.sqlgGraph);
    assertNotNull(this.sqlgGraph.getBuildVersion());
    this.gt = this.sqlgGraph.traversal();
    if (configuration.getBoolean("distributed", false)) {
        this.sqlgGraph1 = SqlgGraph.open(configuration);
        assertNotNull(this.sqlgGraph1);
        assertEquals(this.sqlgGraph.getBuildVersion(), this.sqlgGraph1.getBuildVersion());
    }
    stopWatch.stop();
    logger.info("Startup time for test = " + stopWatch.toString());
}
 
源代码4 项目: liteflow   文件: AbstractUnstatefullJob.java
public void execute(){
    try {
        if(isRunning.get()){
            return;
        }
        isRunning.set(true);
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();
        LOG.info("job start");
        executeInternal();
        LOG.info("job finished, takes {} ms", stopWatch.getTime());
    } catch (Exception e) {
        LOG.error("run error", e);
    }finally{
        isRunning.set(false);
    }
}
 
源代码5 项目: pyramid   文件: LogisticLoss.java
private void updatePredictedCounts(){
    StopWatch stopWatch = new StopWatch();
    if (logger.isDebugEnabled()){
        stopWatch.start();
    }
    IntStream intStream;
    if (isParallel){
        intStream = IntStream.range(0,numParameters).parallel();
    } else {
        intStream = IntStream.range(0,numParameters);
    }

    intStream.forEach(i -> this.predictedCounts.set(i, calPredictedCount(i)));
    if (logger.isDebugEnabled()){
        logger.debug("time spent on updatePredictedCounts = "+stopWatch);
    }
}
 
源代码6 项目: uyuni   文件: ErrataManagerTest.java
public void xxxxLookupErrataByAdvisoryType() throws IOException {

        String bugfix = "Bug Fix Advisory";
        String pea = "Product Enhancement Advisory";
        String security = "Security Advisory";

        StopWatch st = new StopWatch();
        st.start();
        List erratas = ErrataManager.lookupErrataByType(bugfix);
        outputErrataList(erratas);
        System.out.println("Got bugfixes: "  + erratas.size() + " time: " + st);
        assertTrue(erratas.size() > 0);
        erratas = ErrataManager.lookupErrataByType(pea);
        outputErrataList(erratas);
        System.out.println("Got pea enhancments: "  + erratas.size() + " time: " + st);
        assertTrue(erratas.size() > 0);
        erratas = ErrataManager.lookupErrataByType(security);
        outputErrataList(erratas);
        assertTrue(erratas.size() > 0);
        System.out.println("Got security advisories: "  + erratas.size() + " time: " + st);
        st.stop();
        System.out.println("TIME: " + st.getTime());
    }
 
源代码7 项目: pnc   文件: IndyRepositorySession.java
/**
 * Promote all build dependencies NOT ALREADY CAPTURED to the hosted repository holding store for the shared imports
 * and return dependency artifacts meta data.
 *
 * @param report The tracking report that contains info about artifacts downloaded by the build
 * @param promote flag if collected dependencies should be promoted
 * @return List of dependency artifacts meta data
 * @throws RepositoryManagerException In case of a client API transport error or an error during promotion of
 *         artifacts
 * @throws PromotionValidationException when the promotion process results in an error due to validation failure
 */
private List<Artifact> processDownloads(final TrackedContentDTO report, final boolean promote)
        throws RepositoryManagerException, PromotionValidationException {
    List<Artifact> deps;

    logger.info("BEGIN: Process artifacts downloaded by build");
    userLog.info("Processing dependencies");
    StopWatch stopWatch = StopWatch.createStarted();

    Set<TrackedContentEntryDTO> downloads = report.getDownloads();
    if (CollectionUtils.isEmpty(downloads)) {
        deps = Collections.emptyList();
    } else {
        deps = collectDownloadedArtifacts(report);

        if (promote) {
            Map<StoreKey, Map<StoreKey, Set<String>>> depMap = collectDownloadsPromotionMap(downloads);
            promoteDownloads(depMap);
        }
    }

    logger.info("END: Process artifacts downloaded by build, took {} seconds", stopWatch.getTime(TimeUnit.SECONDS));
    return deps;
}
 
源代码8 项目: pyramid   文件: ENRecoverCBMOptimizer.java
@Override
protected void updateBinaryClassifier(int component, int label, MultiLabelClfDataSet activeDataset, double[] activeGammas) {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    if (cbm.binaryClassifiers[component][label] == null || cbm.binaryClassifiers[component][label] instanceof PriorProbClassifier){
        cbm.binaryClassifiers[component][label] = new LogisticRegression(2, activeDataset.getNumFeatures());
    }


    int[] binaryLabels = DataSetUtil.toBinaryLabels(activeDataset.getMultiLabels(), label);
    double[][] targetsDistribution = DataSetUtil.labelsToDistributions(binaryLabels, 2);
    // no parallelism
    ElasticNetLogisticTrainer elasticNetLogisticTrainer = new ElasticNetLogisticTrainer.Builder((LogisticRegression)
            cbm.binaryClassifiers[component][label],  activeDataset, 2, targetsDistribution, activeGammas)
            .setRegularization(regularizationBinary)
            .setL1Ratio(l1RatioBinary)
            .setLineSearch(lineSearch).build();
    elasticNetLogisticTrainer.setActiveSet(activeSet);
    elasticNetLogisticTrainer.getTerminator().setMaxIteration(this.binaryUpdatesPerIter);
    elasticNetLogisticTrainer.optimize();
    if (logger.isDebugEnabled()){
        logger.debug("time spent on updating component "+component+" label "+label+" = "+stopWatch);
    }
}
 
源代码9 项目: hbase   文件: TestCellBlockBuilder.java
private static void timerTests(final CellBlockBuilder builder, final int count, final int size,
    final Codec codec, final CompressionCodec compressor) throws IOException {
  final int cycles = 1000;
  StopWatch timer = new StopWatch();
  timer.start();
  for (int i = 0; i < cycles; i++) {
    timerTest(builder, timer, count, size, codec, compressor, false);
  }
  timer.stop();
  LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + false + ", count="
      + count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
  timer.reset();
  timer.start();
  for (int i = 0; i < cycles; i++) {
    timerTest(builder, timer, count, size, codec, compressor, true);
  }
  timer.stop();
  LOG.info("Codec=" + codec + ", compression=" + compressor + ", sized=" + true + ", count="
      + count + ", size=" + size + ", + took=" + timer.getTime() + "ms");
}
 
源代码10 项目: pyramid   文件: LKTreeBoostTest.java
/**
 * second stage
 * @throws Exception
 */
static void spam_resume_train_2() throws Exception{
    System.out.println("loading ensemble");
    LKBoost lkBoost = LKBoost.deserialize(new File(TMP, "/LKTreeBoostTest/ensemble.ser"));

    ClfDataSet dataSet = TRECFormat.loadClfDataSet(new File(DATASETS,"spam/trec_data/train.trec"),
            DataSetType.CLF_DENSE,true);

    LKBoostOptimizer trainer = new LKBoostOptimizer(lkBoost,dataSet);
    trainer.initialize();

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    for (int round =50;round<100;round++){
        System.out.println("round="+round);
        trainer.iterate();
    }
    stopWatch.stop();
    System.out.println(stopWatch);


    double accuracy = Accuracy.accuracy(lkBoost,dataSet);
    System.out.println(accuracy);

}
 
源代码11 项目: sqlg   文件: TestBatch.java
@Test
public void testVerticesBatchOn() throws InterruptedException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    this.sqlgGraph.tx().normalBatchModeOn();
    for (int i = 0; i < 10000; i++) {
        Vertex v1 = this.sqlgGraph.addVertex(T.label, "MO1", "name", "marko" + i);
        Vertex v2 = this.sqlgGraph.addVertex(T.label, "Person", "name", "marko" + i);
        v1.addEdge("Friend", v2, "name", "xxx");
    }
    this.sqlgGraph.tx().commit();
    stopWatch.stop();
    System.out.println(stopWatch.toString());
    testVerticesBatchOn_assert(this.sqlgGraph);
    if (this.sqlgGraph1 != null) {
        Thread.sleep(1000);
        testVerticesBatchOn_assert(this.sqlgGraph1);
    }
}
 
源代码12 项目: vind   文件: ElasticSearchServer.java
@Override
public GetResult execute(RealTimeGet search, DocumentFactory assets) {
    try {
        final StopWatch elapsedTime = StopWatch.createStarted();
        final MultiGetResponse response = elasticSearchClient.realTimeGet(search.getValues());
        elapsedTime.stop();

        if(response!=null){
            return ResultUtils.buildRealTimeGetResult(response, search, assets, elapsedTime.getTime()).setElapsedTime(elapsedTime.getTime());
        }else {
            log.error("Null result from ElasticClient");
            throw new SearchServerException("Null result from ElasticClient");
        }

    } catch (ElasticsearchException | IOException e) {
        log.error("Cannot execute realTime get query");
        throw new SearchServerException("Cannot execute realTime get query", e);
    }
}
 
源代码13 项目: vind   文件: ElasticSearchServer.java
private IndexResult indexSingleDocument(Document doc, int withinMs) {
    log.warn("Parameter 'within' not in use in elastic search backend");
    final StopWatch elapsedTime = StopWatch.createStarted();
    final Map<String,Object> document = DocumentUtil.createInputDocument(doc);

    try {
        if (elasticClientLogger.isTraceEnabled()) {
            elasticClientLogger.debug(">>> add({})", doc.getId());
        } else {
            elasticClientLogger.debug(">>> add({})", doc.getId());
        }

        final BulkResponse response = this.elasticSearchClient.add(document);
        elapsedTime.stop();
        return new IndexResult(response.getTook().getMillis()).setElapsedTime(elapsedTime.getTime());

    } catch (ElasticsearchException | IOException e) {
        log.error("Cannot index document {}", document.get(FieldUtil.ID) , e);
        throw new SearchServerException("Cannot index document", e);
    }
}
 
源代码14 项目: pnc   文件: IndyRepositorySession.java
@Override
public void deleteBuildGroup() throws RepositoryManagerException {
    logger.info("BEGIN: Removing build aggregation group: {}", buildContentId);
    userLog.info("Removing build aggregation group");
    StopWatch stopWatch = StopWatch.createStarted();

    try {
        StoreKey key = new StoreKey(packageType, StoreType.group, buildContentId);
        serviceAccountIndy.stores().delete(key, "[Post-Build] Removing build aggregation group: " + buildContentId);
    } catch (IndyClientException e) {
        throw new RepositoryManagerException(
                "Failed to retrieve Indy stores module. Reason: %s",
                e,
                e.getMessage());
    }
    logger.info(
            "END: Removing build aggregation group: {}, took: {} seconds",
            buildContentId,
            stopWatch.getTime(TimeUnit.SECONDS));
    stopWatch.reset();
}
 
源代码15 项目: sqlg   文件: TestBatchStreamEdge.java
private ArrayList<SqlgVertex> createMilCarVertex() {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    ArrayList<SqlgVertex> result = new ArrayList<>();
    this.sqlgGraph.tx().normalBatchModeOn();
    for (int i = 1; i < NUMBER_OF_VERTICES + 1; i++) {
        Map<String, Object> keyValue = new LinkedHashMap<>();
        for (int j = 0; j < 100; j++) {
            keyValue.put("name" + j, "aaaaaaaaaa" + i);
        }
        SqlgVertex car = (SqlgVertex) this.sqlgGraph.addVertex("Car", keyValue);
        result.add(car);
        if (i % (NUMBER_OF_VERTICES / 10) == 0) {
            this.sqlgGraph.tx().commit();
            this.sqlgGraph.tx().normalBatchModeOn();
        }
    }
    this.sqlgGraph.tx().commit();
    stopWatch.stop();
    System.out.println("createMilCarVertex took " + stopWatch.toString());
    return result;
}
 
源代码16 项目: pyramid   文件: CBMEN.java
private static void reportAccPrediction(Config config, CBM cbm, MultiLabelClfDataSet dataSet, String name) throws Exception{
    System.out.println("============================================================");
    System.out.println("Making predictions on "+name +" set with the instance set accuracy optimal predictor");
    String output = config.getString("output.dir");
    AccPredictor accPredictor = new AccPredictor(cbm);
    accPredictor.setComponentContributionThreshold(config.getDouble("predict.piThreshold"));
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    MultiLabel[] predictions = accPredictor.predict(dataSet);
    System.out.println("time spent on prediction = "+stopWatch);
    MLMeasures mlMeasures = new MLMeasures(dataSet.getNumClasses(),dataSet.getMultiLabels(),predictions);
    System.out.println(name+" performance with the instance set accuracy optimal predictor");
    System.out.println(mlMeasures);
    File performanceFile = Paths.get(output,name+"_predictions", "instance_accuracy_optimal","performance.txt").toFile();
    FileUtils.writeStringToFile(performanceFile, mlMeasures.toString());
    System.out.println(name+" performance is saved to "+performanceFile.toString());


    // Here we do not use approximation
    double[] setProbs = IntStream.range(0, predictions.length).parallel().
            mapToDouble(i->cbm.predictAssignmentProb(dataSet.getRow(i),predictions[i])).toArray();
    File predictionFile = Paths.get(output,name+"_predictions", "instance_accuracy_optimal","predictions.txt").toFile();
    try (BufferedWriter br = new BufferedWriter(new FileWriter(predictionFile))){
        for (int i=0;i<dataSet.getNumDataPoints();i++){
            br.write(predictions[i].toString());
            br.write(":");
            br.write(""+setProbs[i]);
            br.newLine();
        }
    }

    System.out.println("predicted sets and their probabilities are saved to "+predictionFile.getAbsolutePath());

    boolean individualPerformance = true;
    if (individualPerformance){
        ObjectMapper objectMapper = new ObjectMapper();
        objectMapper.writeValue(Paths.get(output,name+"_predictions", "instance_accuracy_optimal","individual_performance.json").toFile(),mlMeasures.getMacroAverage());
    }
    System.out.println("============================================================");
}
 
源代码17 项目: components   文件: CustomMetaDataRetrieverImpl.java
public List<NsRef> retrieveCustomizationIds(final BasicRecordType type) throws NetSuiteException {
    GetCustomizationIdResult result = clientService.execute(new NetSuiteClientService.PortOperation<GetCustomizationIdResult, NetSuitePortType>() {
        @Override public GetCustomizationIdResult execute(NetSuitePortType port) throws Exception {
            logger.debug("Retrieving customization IDs: {}", type.getType());
            StopWatch stopWatch = new StopWatch();
            try {
                stopWatch.start();
                final GetCustomizationIdRequest request = new GetCustomizationIdRequest();
                CustomizationType customizationType = new CustomizationType();
                customizationType.setGetCustomizationType(GetCustomizationType.fromValue(type.getType()));
                request.setCustomizationType(customizationType);
                return port.getCustomizationId(request).getGetCustomizationIdResult();
            } finally {
                stopWatch.stop();
                logger.debug("Retrieved customization IDs: {}, {}", type.getType(), stopWatch);
            }
        }
    });
    if (result.getStatus().getIsSuccess()) {
        List<NsRef> nsRefs;
        if (result.getTotalRecords() > 0) {
            final List<CustomizationRef> refs = result.getCustomizationRefList().getCustomizationRef();
            nsRefs = new ArrayList<>(refs.size());
            for (final CustomizationRef ref : refs) {
                NsRef nsRef = new NsRef();
                nsRef.setRefType(RefType.CUSTOMIZATION_REF);
                nsRef.setScriptId(ref.getScriptId());
                nsRef.setInternalId(ref.getInternalId());
                nsRef.setType(ref.getType().value());
                nsRef.setName(ref.getName());
                nsRefs.add(nsRef);
            }
        } else {
            nsRefs = Collections.emptyList();
        }
        return nsRefs;
    } else {
        throw new NetSuiteException("Retrieving of customizations was not successful: " + type);
    }
}
 
源代码18 项目: sqlg   文件: TestBulkWithout.java
@Test
public void testBulkWithinMultipleHasContainers() throws InterruptedException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    Vertex god = this.sqlgGraph.addVertex(T.label, "God");
    Vertex person1 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 1, "name", "pete");
    god.addEdge("creator", person1);
    Vertex person2 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 2, "name", "pete");
    god.addEdge("creator", person2);
    Vertex person3 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 3, "name", "john");
    god.addEdge("creator", person3);
    Vertex person4 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 4, "name", "pete");
    god.addEdge("creator", person4);
    Vertex person5 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 5, "name", "pete");
    god.addEdge("creator", person5);
    Vertex person6 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 6, "name", "pete");
    god.addEdge("creator", person6);
    Vertex person7 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 7, "name", "pete");
    god.addEdge("creator", person7);
    Vertex person8 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 8, "name", "pete");
    god.addEdge("creator", person8);
    Vertex person9 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 9, "name", "pete");
    god.addEdge("creator", person9);
    Vertex person10 = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", 10, "name", "pete");
    god.addEdge("creator", person10);

    this.sqlgGraph.tx().commit();
    stopWatch.stop();
    System.out.println(stopWatch.toString());
    stopWatch.reset();
    stopWatch.start();
    testBulkWithinMultipleHasContainers_assert(this.sqlgGraph);
    if (this.sqlgGraph1 != null) {
        Thread.sleep(SLEEP_TIME);
        testBulkWithinMultipleHasContainers_assert(this.sqlgGraph1);
    }
    stopWatch.stop();
    System.out.println(stopWatch.toString());
}
 
源代码19 项目: alf.io   文件: SpecialPriceTokenGenerator.java
public void generatePendingCodes() {
    StopWatch stopWatch = new StopWatch();
    log.trace("start pending codes generation");
    stopWatch.start();
    specialPriceRepository.findWaitingElements().forEach(this::generateCode);
    stopWatch.stop();
    log.trace("end. Took {} ms", stopWatch.getTime());
}
 
源代码20 项目: owltools   文件: GoLoaderIntegrationRunner.java
@Test
@Ignore("This test requires a missing resource.")
public void testLoadGoTaxon() throws Exception {
	ParserWrapper pw = new ParserWrapper();
	pw.addIRIMapper(new CatalogXmlIRIMapper(catalogXml));
	OWLGraphWrapper g = pw.parseToOWLGraph(goFile);
	
	GafSolrDocumentLoaderIntegrationRunner.printMemoryStats();
	GafSolrDocumentLoaderIntegrationRunner.gc();
	GafSolrDocumentLoaderIntegrationRunner.printMemoryStats();
	
	ConfigManager configManager = new ConfigManager();
	configManager.add("src/test/resources/test-ont-config.yaml");
	
	StopWatch watch = new StopWatch();
	watch.start();
	FlexCollection c = new FlexCollection(configManager, g);
	
	GafSolrDocumentLoaderIntegrationRunner.printMemoryStats();
	GafSolrDocumentLoaderIntegrationRunner.gc();
	GafSolrDocumentLoaderIntegrationRunner.printMemoryStats();
	
	FlexSolrDocumentLoader loader = new FlexSolrDocumentLoader((SolrServer)null, c) {

		@Override
		protected void addToServer(Collection<SolrInputDocument> docs)
				throws SolrServerException, IOException {
			solrCounter += docs.size();
		}
		
	};
	loader.load();
	watch.stop();
	GafSolrDocumentLoaderIntegrationRunner.printMemoryStats();
	System.out.println("Loaded "+solrCounter+" Solr docs in "+watch);
	assertTrue(solrCounter > 0);
}
 
源代码21 项目: synopsys-detect   文件: Timekeeper.java
private StopWatch getStopWatch(final T key) {
    if (stopWatches.containsKey(key)) {
        return stopWatches.get(key);
    } else {
        final StopWatch sw = new StopWatch();
        stopWatches.put(key, sw);
        return sw;
    }
}
 
源代码22 项目: components   文件: NetSuiteClientServiceIT.java
@Test
public void testRetrieveCustomRecordCustomFields() throws Exception {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    RecordTypeInfo recordType = connection.getMetaDataSource()
            .getRecordType("customrecord_campaign_revenue");

    TypeDesc typeDesc = connection.getMetaDataSource().getTypeInfo(recordType.getName());
    logger.debug("Record type desc: {}", typeDesc.getTypeName());

    stopWatch.stop();
}
 
源代码23 项目: milkman   文件: ContentEditor.java
private StyleSpans<Collection<String>> computeHighlighting(String text) {
		StopWatch s = new StopWatch();
		s.start();
		try {
			if (getCurrentContenttypePlugin() != null && !shouldSkipHighlighting(text))
				return getCurrentContenttypePlugin().computeHighlighting(text);
			else
				return noHighlight(text);
		} finally {
			s.stop();
//			System.out.println("Highlighting code: " + s.getTime() + " ms");
		}
	}
 
源代码24 项目: sqlg   文件: TestBulkWithout.java
@Test
public void testBulkWithout() throws InterruptedException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    if (this.sqlgGraph.getSqlDialect().supportsBatchMode()) {
        this.sqlgGraph.tx().normalBatchModeOn();
    }
    Vertex god = this.sqlgGraph.addVertex(T.label, "God");
    List<String> uuids = new ArrayList<>();
    for (int i = 0; i < 100; i++) {
        String uuid = UUID.randomUUID().toString();
        uuids.add(uuid);
        Vertex person = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", uuid);
        god.addEdge("creator", person);
    }
    this.sqlgGraph.tx().commit();
    stopWatch.stop();
    System.out.println(stopWatch.toString());
    stopWatch.reset();
    stopWatch.start();
    testBulkWithout_assert(this.sqlgGraph, uuids);
    if (this.sqlgGraph1 != null) {
        Thread.sleep(SLEEP_TIME);
        testBulkWithout_assert(this.sqlgGraph1, uuids);
    }
    stopWatch.stop();
    System.out.println(stopWatch.toString());
}
 
源代码25 项目: uyuni   文件: ErrataCacheTaskTest.java
public void aTestExecute() throws Exception {
    StopWatch sw = new StopWatch();

    ErrataCacheTask ect = new ErrataCacheTask();

    sw.start();
    ect.execute(null);
    sw.stop();
    System.out.println("ErrataCacheTask took [" + sw.getTime() + "]");
}
 
源代码26 项目: sqlg   文件: TestBatchStreamEdge.java
@Test
public void testMilCompleteEdges() {
    ArrayList<SqlgVertex> persons = createMilPersonVertex();
    ArrayList<SqlgVertex> cars = createMilCarVertex();
    this.sqlgGraph.tx().commit();
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    this.sqlgGraph.tx().streamingBatchModeOn();
    LinkedHashMap<String, Object> keyValues = new LinkedHashMap<>();
    keyValues.put("name", "halo");
    keyValues.put("name2", "halo");
    for (int i = 0; i < NUMBER_OF_VERTICES; i++) {
        SqlgVertex person = persons.get(0);
        SqlgVertex car = cars.get(i);
        person.streamEdge("person_car", car, keyValues);
    }
    this.sqlgGraph.tx().commit();
    int mb = 1024 * 1024;

    // get Runtime instance
    Runtime instance = Runtime.getRuntime();

    System.out.println("***** Heap utilization statistics [MB] *****\n");

    // available memory
    System.out.println("Total Memory: " + instance.totalMemory() / mb);

    // free memory
    System.out.println("Free Memory: " + instance.freeMemory() / mb);

    // used memory
    System.out.println("Used Memory: "
            + (instance.totalMemory() - instance.freeMemory()) / mb);

    // Maximum available memory
    System.out.println("Max Memory: " + instance.maxMemory() / mb);
    Assert.assertEquals(NUMBER_OF_VERTICES, this.sqlgGraph.traversal().V(persons.get(0)).out("person_car").toList().size());
    stopWatch.stop();
    System.out.println("testMilCompleteEdges took " + stopWatch.toString());
}
 
源代码27 项目: sqlg   文件: TestBulkWithin.java
@Test
public void testBulkWithinVertexCompileStep() throws InterruptedException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    if (this.sqlgGraph.getSqlDialect().supportsBatchMode()) {
        this.sqlgGraph.tx().normalBatchModeOn();
    }
    Vertex god = this.sqlgGraph.addVertex(T.label, "God");
    List<String> uuids = new ArrayList<>();
    for (int i = 0; i < 100; i++) {
        String uuid = UUID.randomUUID().toString();
        uuids.add(uuid);
        Vertex person = this.sqlgGraph.addVertex(T.label, "Person", "idNumber", uuid);
        god.addEdge("creator", person);
    }
    this.sqlgGraph.tx().commit();
    stopWatch.stop();
    System.out.println(stopWatch.toString());
    stopWatch.reset();
    stopWatch.start();
    testBulkWithinVertexCompileStep_assert(this.sqlgGraph, god, uuids);
    if (this.sqlgGraph1 != null) {
        Thread.sleep(SLEEP_TIME);
        testBulkWithinVertexCompileStep_assert(this.sqlgGraph1, god, uuids);
    }
    stopWatch.stop();
    System.out.println(stopWatch.toString());

}
 
源代码28 项目: pyramid   文件: CBMEN.java
private static void reportHammingPrediction(Config config, CBM cbm, MultiLabelClfDataSet dataSet, String name) throws Exception{
    System.out.println("============================================================");
    System.out.println("Making predictions on "+name+" set with the instance Hamming loss optimal predictor");
    String output = config.getString("output.dir");
    MarginalPredictor marginalPredictor = new MarginalPredictor(cbm);
    marginalPredictor.setPiThreshold(config.getDouble("predict.piThreshold"));
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    MultiLabel[] predictions = marginalPredictor.predict(dataSet);
    System.out.println("time spent on prediction = "+stopWatch);
    MLMeasures mlMeasures = new MLMeasures(dataSet.getNumClasses(),dataSet.getMultiLabels(),predictions);
    System.out.println(name+" performance with the instance Hamming loss optimal predictor");
    System.out.println(mlMeasures);
    File performanceFile = Paths.get(output,name+"_predictions", "instance_hamming_loss_optimal","performance.txt").toFile();
    FileUtils.writeStringToFile(performanceFile, mlMeasures.toString());
    System.out.println(name+" performance is saved to "+performanceFile.toString());


    // Here we do not use approximation
    double[] setProbs = IntStream.range(0, predictions.length).parallel().
            mapToDouble(i->cbm.predictAssignmentProb(dataSet.getRow(i),predictions[i])).toArray();
    File predictionFile = Paths.get(output,name+"_predictions", "instance_hamming_loss_optimal","predictions.txt").toFile();
    try (BufferedWriter br = new BufferedWriter(new FileWriter(predictionFile))){
        for (int i=0;i<dataSet.getNumDataPoints();i++){
            br.write(predictions[i].toString());
            br.write(":");
            br.write(""+setProbs[i]);
            br.newLine();
        }
    }

    System.out.println("predicted sets and their probabilities are saved to "+predictionFile.getAbsolutePath());

    boolean individualPerformance = true;
    if (individualPerformance){
        ObjectMapper objectMapper = new ObjectMapper();
        objectMapper.writeValue(Paths.get(output,name+"_predictions", "instance_hamming_loss_optimal","individual_performance.json").toFile(),mlMeasures.getMacroAverage());
    }
    System.out.println("============================================================");
}
 
源代码29 项目: snowflake-jdbc   文件: TelemetryServiceIT.java
@Ignore
@Test
public void stressTestCreateLog()
{
  // this log will be delivered to snowflake
  TelemetryService service = TelemetryService.getInstance();
  // send one http request for each event
  StopWatch sw = new StopWatch();
  sw.start();
  int rate = 50;
  int sent = 0;
  int duration = 60;
  while (sw.getTime() < duration * 1000)
  {
    int toSend = (int) (sw.getTime() / 1000) * rate - sent;
    for (int i = 0; i < toSend; i++)
    {
      TelemetryEvent log = new TelemetryEvent.LogBuilder()
          .withName("StressTestLog")
          .withValue("This is an example log for stress test " + sent)
          .build();
      System.out.println("stress test: " + sent++ + " sent.");
      service.report(log);
    }
  }
  sw.stop();
}
 
@Override
public RepositoryStatistics getLastStatistics( String repositoryId )
    throws MetadataRepositoryException
{
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    try(RepositorySession session = repositorySessionFactory.createSession()) {
        final MetadataRepository metadataRepository = session.getRepository( );

        // TODO: consider a more efficient implementation that directly gets the last one from the content repository
        List<String> scans = metadataRepository.getMetadataFacets(session, repositoryId, DefaultRepositoryStatistics.FACET_ID);
        if (scans == null) {
            return null;
        }
        Collections.sort(scans);
        if (!scans.isEmpty()) {
            String name = scans.get(scans.size() - 1);
            RepositoryStatistics repositoryStatistics =
                    RepositoryStatistics.class.cast(metadataRepository.getMetadataFacet(session, repositoryId,
                            RepositoryStatistics.FACET_ID, name));
            stopWatch.stop();
            log.debug("time to find last RepositoryStatistics: {} ms", stopWatch.getTime());
            return repositoryStatistics;
        } else {
            return null;
        }
    }
}
 
 类所在包
 同包方法