类org.apache.commons.lang.time.StopWatch源码实例Demo

下面列出了怎么用org.apache.commons.lang.time.StopWatch的API类实例代码及写法,或者点击链接到github查看源代码。

public static void main(String[] args) {
    try {
        
        final int numThreads = Integer.valueOf(args[0]).intValue();
        final int numIters = Integer.valueOf(args[1]).intValue();
        final int poolSize = Integer.valueOf(args[2]).intValue();
        
        PooledPBEWithMD5AndDESStringEncryptorThreadedTest test = 
            new PooledPBEWithMD5AndDESStringEncryptorThreadedTest(numThreads, numIters, poolSize);
        
        System.out.println("Starting test. NumThreads: " + numThreads + " NumIters: " + numIters + " PoolSize: " + poolSize);
        StopWatch sw = new StopWatch();
        sw.start();
        test.testThreadedDigest();
        sw.stop();
        System.out.println("Test finished in: " + sw.toString());
        
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
public static void main(String[] args) {
    try {
        
        StandardStringDigesterThreadedTest test = new StandardStringDigesterThreadedTest();
        
        System.out.println("Starting test");
        StopWatch sw = new StopWatch();
        sw.start();
        test.testThreadedDigest();
        sw.stop();
        System.out.println("Test finished in: " + sw.toString());
        
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
public static void main(String[] args) {
    try {
        
        final int numThreads = Integer.valueOf(args[0]).intValue();
        final int numIters = Integer.valueOf(args[1]).intValue();
        final int poolSize = Integer.valueOf(args[2]).intValue();
        
        PooledStandardStringDigesterThreadedTest test = 
            new PooledStandardStringDigesterThreadedTest(numThreads, numIters, poolSize);
        
        System.out.println("Starting test. NumThreads: " + numThreads + " NumIters: " + numIters + " PoolSize: " + poolSize);
        StopWatch sw = new StopWatch();
        sw.start();
        test.testThreadedDigest();
        sw.stop();
        System.out.println("Test finished in: " + sw.toString());
        
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
源代码4 项目: soundwave   文件: EsServiceMappingStore.java
public List<EsServiceMapping> getServiceMappings() throws Exception {
  StopWatch sw = new StopWatch();
  sw.start();
  List<EsServiceMapping> ret = new ArrayList<>();
  SearchResponse result = getByDocType(10000);
  for (SearchHit hit : result.getHits()) {
    try {
      EsServiceMapping
          serviceMapping =
          mapper.readValue(hit.getSourceAsString(), EsServiceMapping.class);
      serviceMapping.buildMatchPatterns();
      ret.add(serviceMapping);
    } catch (Exception ex) {
      logger.error("Cannot create Service mapping from {}", hit.getSourceAsString());
    }
  }
  sw.stop();
  logger.info("Refresh all service mappings in {} ms", sw.getTime());
  return ret;
}
 
源代码5 项目: SkyEye   文件: CacheService.java
/**
 * 将数据库中的配置表进行缓存
 */
private void loadCache() {
    StopWatch sw = new StopWatch();
    sw.start();
    LOGGER.info("start load config to cache");

    Iterable<ServiceInfo> serviceInfos = this.serviceInfoRepository.findAll();

    for (Iterator<ServiceInfo> it = serviceInfos.iterator(); it.hasNext();) {
        ServiceInfo serviceInfo = it.next();
        this.setOps.add(SERVICE_INFO_PREFIX, serviceInfo.getSid());
    }

    sw.stop();
    LOGGER.info("load config to cache end, cost {} ms", sw.getTime());
}
 
源代码6 项目: SkyEye   文件: CacheService.java
/**
 * 将数据库中的配置表进行缓存
 */
private void loadCache() {
    StopWatch sw = new StopWatch();
    sw.start();
    LOGGER.info("start load config to cache");

    Iterable<NameInfo> nameInfos = this.nameInfoRepository.findAll();

    for (Iterator<NameInfo> it = nameInfos.iterator(); it.hasNext();) {
        NameInfo nameInfo = it.next();
        this.setOps.add(mapping.get(nameInfo.getNameInfoPK().getType()), nameInfo.getNameInfoPK().getName());
    }

    sw.stop();
    LOGGER.info("load config to cache end, cost {} ms", sw.getTime());
}
 
源代码7 项目: freebencher   文件: FbRunner.java
private void doIt() {
	boolean successful = false;

	StopWatch stopWatch = new StopWatch();
	stopWatch.start();
	try {
		successful = job.getTarget().invoke();
	} catch (Exception e) {
		successful = false;
	}
	stopWatch.stop();
	if (successful) {
		job.getResult().getSuccessfulTests().incrementAndGet();
	} else {
		job.getResult().getFailedTests().incrementAndGet();
	}

	job.getResult()
			.addSingleTestResult(successful, stopWatch.getTime());
	int results = job.getResult().getNumOfTests();
	if (results != 0 && !job.getOptions().isQuiet()
			&& (job.getResult().getNumOfTests() % 100 == 0)) {
		System.err.printf("%d/%d are done\n", results, job.getOptions()
				.getNumOfTests());
	}
}
 
@Override
public void execute() throws Exception {
    if (!this.executed) {
        StopWatch _time = new StopWatch();
        _time.start();
        try {
            __doExecute();
            // 执行过程未发生异常将标记已执行,避免重复执行
            this.executed = true;
        } finally {
            _time.stop();
            this.expenseTime = _time.getTime();
            //
            if (this.getConnectionHolder().getDataSourceCfgMeta().isShowSQL()) {
                _LOG.info(ExpressionUtils.bind("[${sql}]${param}[${count}][${time}]")
                        .set("sql", StringUtils.defaultIfBlank(this.sql, "@NULL"))
                        .set("param", __doSerializeParameters())
                        .set("count", "N/A")
                        .set("time", this.expenseTime + "ms").getResult());
            }
        }
    }
}
 
源代码9 项目: Eagle   文件: JdbcEntityDeleterImpl.java
private int deleteByCriteria(Criteria criteria) throws Exception {
    String displaySql = SqlBuilder.buildQuery(criteria).getDisplayString();
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    if(LOG.isDebugEnabled()) LOG.debug("Deleting by query: " + displaySql);
    try {
        TorqueStatementPeerImpl peer = ConnectionManagerFactory.getInstance().getStatementExecutor();
        int result = peer.delegate().doDelete(criteria);
        LOG.info(String.format("Deleted %s records in %s ms (sql: %s)",result,stopWatch.getTime(),displaySql));
        return result;
    } catch (Exception e) {
        LOG.error("Failed to delete by query: "+displaySql,e);
        throw e;
    } finally {
        stopWatch.stop();
    }
}
 
源代码10 项目: Eagle   文件: JdbcEntityReaderImpl.java
@Override
public <E> List<E> query(List<String> ids) throws Exception {
    PrimaryKeyCriteriaBuilder criteriaBuilder = new PrimaryKeyCriteriaBuilder(ids,this.jdbcEntityDefinition.getJdbcTableName());
    Criteria criteria = criteriaBuilder.build();
    String displaySql = SqlBuilder.buildQuery(criteria).getDisplayString();
    if(LOG.isDebugEnabled()) LOG.debug("Querying: " + displaySql);
    EntityRecordMapper recordMapper = new EntityRecordMapper(jdbcEntityDefinition);
    final StopWatch stopWatch = new StopWatch();
    List<E> result;
    try {
        stopWatch.start();
        TorqueStatementPeerImpl peer = ConnectionManagerFactory.getInstance().getStatementExecutor();
        result = peer.delegate().doSelect(criteria, recordMapper);
        LOG.info(String.format("Read %s records in %s ms (sql: %s)",result.size(),stopWatch.getTime(),displaySql));
    }catch (Exception ex){
        LOG.error("Failed to query by: "+displaySql+", due to: "+ex.getMessage(),ex);
        throw new IOException("Failed to query by: "+displaySql,ex);
    }finally {
        stopWatch.stop();
    }
    return result;
}
 
源代码11 项目: Eagle   文件: TestJdbcStorage.java
/**
 * TODO: Investigate why writing performance becomes slower as records count increases
 *
 * 1) Wrote 100000 records in about 18820 ms for empty table
 * 2) Wrote 100000 records in about 35056 ms when 1M records in table
 *
 * @throws IOException
 */
//@Test
public void testWriterPerformance() throws IOException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    List<TestTimeSeriesAPIEntity> entityList = new ArrayList<TestTimeSeriesAPIEntity>();
    int i= 0;
    while( i++ < 100000){
        entityList.add(newInstance());
        if(entityList.size()>=1000) {
            ModifyResult<String> result = storage.create(entityList, entityDefinition);
            Assert.assertNotNull(result);
            entityList.clear();
        }
    }
    stopWatch.stop();
    LOG.info("Wrote 100000 records in "+stopWatch.getTime()+" ms");
}
 
源代码12 项目: eagle   文件: JdbcEntityDeleterImpl.java
private int deleteByCriteria(Criteria criteria) throws Exception {
    String displaySql = SqlBuilder.buildQuery(criteria).getDisplayString();
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    if(LOG.isDebugEnabled()) LOG.debug("Deleting by query: " + displaySql);
    try {
        TorqueStatementPeerImpl peer = ConnectionManagerFactory.getInstance().getStatementExecutor();
        int result = peer.delegate().doDelete(criteria);
        LOG.info(String.format("Deleted %s records in %s ms (sql: %s)",result,stopWatch.getTime(),displaySql));
        return result;
    } catch (Exception e) {
        LOG.error("Failed to delete by query: "+displaySql,e);
        throw e;
    } finally {
        stopWatch.stop();
    }
}
 
源代码13 项目: eagle   文件: JdbcEntityReaderImpl.java
@Override
public <E> List<E> query(List<String> ids) throws Exception {
    PrimaryKeyCriteriaBuilder criteriaBuilder = new PrimaryKeyCriteriaBuilder(ids,this.jdbcEntityDefinition.getJdbcTableName());
    Criteria criteria = criteriaBuilder.build();
    String displaySql = SqlBuilder.buildQuery(criteria).getDisplayString();
    if(LOG.isDebugEnabled()) LOG.debug("Querying: " + displaySql);
    EntityRecordMapper recordMapper = new EntityRecordMapper(jdbcEntityDefinition);
    final StopWatch stopWatch = new StopWatch();
    List<E> result;
    try {
        stopWatch.start();
        TorqueStatementPeerImpl peer = ConnectionManagerFactory.getInstance().getStatementExecutor();
        criteria.addSelectColumn(new ColumnImpl(jdbcEntityDefinition.getJdbcTableName(),"*"));
        result = peer.delegate().doSelect(criteria, recordMapper);
        LOG.info(String.format("Read %s records in %s ms (sql: %s)",result.size(),stopWatch.getTime(),displaySql));
    }catch (Exception ex){
        LOG.error("Failed to query by: "+displaySql+", due to: "+ex.getMessage(),ex);
        throw new IOException("Failed to query by: "+displaySql,ex);
    }finally {
        stopWatch.stop();
    }
    return result;
}
 
源代码14 项目: eagle   文件: TestJdbcStorage.java
/**
 * TODO: Investigate why writing performance becomes slower as records count increases
 *
 * 1) Wrote 100000 records in about 18820 ms for empty table
 * 2) Wrote 100000 records in about 35056 ms when 1M records in table
 *
 * @throws IOException
 */
@Test @Ignore("Ignore performance auto testing")
public void testWriterPerformance() throws IOException {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    List<TestTimeSeriesAPIEntity> entityList = new ArrayList<TestTimeSeriesAPIEntity>();
    int i= 0;
    while( i++ < 100000){
        entityList.add(newInstance());
        if(entityList.size()>=1000) {
            ModifyResult<String> result = storage.create(entityList, entityDefinition);
            Assert.assertNotNull(result);
            entityList.clear();
        }
    }
    stopWatch.stop();
    LOG.info("Wrote 100000 records in "+stopWatch.getTime()+" ms");
}
 
源代码15 项目: eagle   文件: StreamWindowBenchmarkTest.java
public void sendDESCOrderedEventsToWindow(StreamWindow window, StreamWindowRepository.StorageType storageType, int num) {
    LOGGER.info("Sending {} events to {} ({})", num, window.getClass().getSimpleName(), storageType);
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    int i = 0;
    while (i < num) {
        PartitionedEvent event = MockSampleMetadataFactory.createPartitionedEventGroupedByName("sampleStream_1", (window.startTime() + i));
        window.add(event);
        i++;
    }
    stopWatch.stop();
    performanceReport.put(num + "\tInsertTime\t" + storageType, stopWatch.getTime());
    LOGGER.info("Inserted {} events in {} ms", num, stopWatch.getTime());
    stopWatch.reset();
    stopWatch.start();
    window.flush();
    stopWatch.stop();
    performanceReport.put(num + "\tReadTime\t" + storageType, stopWatch.getTime());
}
 
源代码16 项目: eagle   文件: StreamWindowBenchmarkTest.java
private void benchmarkTest(StreamWindow window, StreamWindowRepository.StorageType storageType) {
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    LOGGER.info("\n===== Benchmark Test for {} ({}) =====", window.getClass().getSimpleName(), storageType);
    metricReporter.report();
    sendDESCOrderedEventsToWindow(window, storageType, 1000);
    metricReporter.report();
    sendDESCOrderedEventsToWindow(window, storageType, 10000);
    metricReporter.report();
    sendDESCOrderedEventsToWindow(window, storageType, 100000);
    metricReporter.report();
    sendDESCOrderedEventsToWindow(window, storageType, 1000000);
    metricReporter.report();
    stopWatch.stop();
    LOGGER.info("\n===== Finished in total {} ms =====\n", stopWatch.getTime());
}
 
源代码17 项目: score   文件: ScoreEngineJobsImpl.java
/**
 * Job that will handle the joining of finished branches for parallel and non-blocking steps.
 */
@Override
public void joinFinishedSplitsJob() {
    try {
        if (logger.isDebugEnabled()) logger.debug("SplitJoinJob woke up at " + new Date());
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();

        // try sequentially at most 'ITERATIONS' attempts
        // quit when there aren't any more results to process
        boolean moreToJoin = true;
        for (int i = 0; i < SPLIT_JOIN_ITERATIONS && moreToJoin; i++) {
            int joinedSplits = splitJoinService.joinFinishedSplits(SPLIT_JOIN_BULK_SIZE);
            moreToJoin = (joinedSplits == SPLIT_JOIN_BULK_SIZE);
        }

        stopWatch.stop();
        if (logger.isDebugEnabled()) logger.debug("finished SplitJoinJob in " + stopWatch);
    } catch (Exception ex) {
        logger.error("SplitJoinJob failed", ex);
    }
}
 
源代码18 项目: spacewalk   文件: ErrataManagerTest.java
public void xxxxLookupErrataByAdvisoryType() throws IOException {

        String bugfix = "Bug Fix Advisory";
        String pea = "Product Enhancement Advisory";
        String security = "Security Advisory";

        StopWatch st = new StopWatch();
        st.start();
        List erratas = ErrataManager.lookupErrataByType(bugfix);
        outputErrataList(erratas);
        System.out.println("Got bugfixes: "  + erratas.size() + " time: " + st);
        assertTrue(erratas.size() > 0);
        erratas = ErrataManager.lookupErrataByType(pea);
        outputErrataList(erratas);
        System.out.println("Got pea enhancments: "  + erratas.size() + " time: " + st);
        assertTrue(erratas.size() > 0);
        erratas = ErrataManager.lookupErrataByType(security);
        outputErrataList(erratas);
        assertTrue(erratas.size() > 0);
        System.out.println("Got security advisories: "  + erratas.size() + " time: " + st);
        st.stop();
        System.out.println("TIME: " + st.getTime());
    }
 
源代码19 项目: spacewalk   文件: BaseUpdateChannelCommand.java
/**
 * Private helper method to create a new UpdateErrataCacheEvent and publish it to the
 * MessageQueue.
 * @param orgIn The org we're updating.
 */
private void publishUpdateErrataCacheEvent() {
    StopWatch sw = new StopWatch();
    if (log.isDebugEnabled()) {
        log.debug("Updating errata cache");
        sw.start();
    }

    UpdateErrataCacheEvent uece =
        new UpdateErrataCacheEvent(UpdateErrataCacheEvent.TYPE_ORG);
    uece.setOrgId(user.getOrg().getId());
    MessageQueue.publish(uece);

    if (log.isDebugEnabled()) {
        sw.stop();
        log.debug("Finished Updating errata cache. Took [" +
                sw.getTime() + "]");
    }
}
 
源代码20 项目: spacewalk   文件: ChannelSoftwareHandler.java
/**
 * Private helper method to create a new UpdateErrataCacheEvent and publish it to the
 * MessageQueue.
 * @param orgIn The org we're updating.
 */
private void publishUpdateErrataCacheEvent(Org orgIn) {
    StopWatch sw = new StopWatch();
    if (log.isDebugEnabled()) {
        log.debug("Updating errata cache");
        sw.start();
    }

    UpdateErrataCacheEvent uece =
        new UpdateErrataCacheEvent(UpdateErrataCacheEvent.TYPE_ORG);
    uece.setOrgId(orgIn.getId());
    MessageQueue.publish(uece);

    if (log.isDebugEnabled()) {
        sw.stop();
        log.debug("Finished Updating errata cache. Took [" +
                sw.getTime() + "]");
    }
}
 
源代码21 项目: spacewalk   文件: LoginHelper.java
/**
 * @param orgIn
 */
private static void publishUpdateErrataCacheEvent(Org orgIn) {
    StopWatch sw = new StopWatch();
    if (log.isDebugEnabled()) {
        log.debug("Updating errata cache");
        sw.start();
    }

    UpdateErrataCacheEvent uece = new
        UpdateErrataCacheEvent(UpdateErrataCacheEvent.TYPE_ORG);
    uece.setOrgId(orgIn.getId());
    MessageQueue.publish(uece);

    if (log.isDebugEnabled()) {
        sw.stop();
        log.debug("Finished Updating errata cache. Took [" +
                sw.getTime() + "]");
    }
}
 
源代码22 项目: staash   文件: ArchaeusPassConfigurationTest.java
void timeConfig(MyConfig config, String name, int count) {
        StopWatch sw = new StopWatch();
        sw.start();
        for (int i = 0; i < count; i++) {
            for (Method method : MyConfig.class.getMethods()) {
                try {
                    Object value = method.invoke(config);
//                    System.out.println(name + " " + method.getName() + " " + value);
                    
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
        
        System.out.println(name + " took " + sw.getTime());
    }
 
源代码23 项目: datawave   文件: RunningResource.java
/**
 * Initializes the scanner resource
 * 
 * @param auths
 * @param tableName
 * @throws TableNotFoundException
 * 
 */
@Override
protected void init(final String tableName, final Set<Authorizations> auths, Collection<Range> currentRange) throws TableNotFoundException {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkArgument(null != currentRange && !currentRange.isEmpty());
    
    // copy the appropriate variables.
    ranges = Lists.newArrayList(currentRange);
    
    this.tableName = tableName;
    
    this.auths = Sets.newHashSet(auths);
    
    if (log.isTraceEnabled())
        log.trace("Creating scanner resource from " + tableName + " " + auths + " " + currentRange);
    
    internalTimer = new StopWatch();
    internalTimer.start();
    
    // let's pre-compute the hashcode.
    hashCode += new HashCodeBuilder().append(tableName).append(auths).append(ranges).toHashCode();
    
    baseScanner = ScannerHelper.createScanner(getConnector(), tableName, auths);
    
    if (baseScanner != null) {
        ((Scanner) baseScanner).setRange(currentRange.iterator().next());
    }
    
}
 
源代码24 项目: datawave   文件: ScanSessionStats.java
public ScanSessionStats() {
    timers = new EnumMap<>(TIMERS.class);
    
    mergedTimers = new EnumMap<>(TIMERS.class);
    
    for (TIMERS timer : TIMERS.values()) {
        timers.put(timer, new StopWatch());
        mergedTimers.put(timer, new MutableLong());
    }
    
    keysSeen = new MutableLong();
}
 
源代码25 项目: datawave   文件: BatchResource.java
/**
 * Initializes the scanner resource
 * 
 * @param auths
 * @param tableName
 * @throws TableNotFoundException
 * 
 */
@Override
protected void init(final String tableName, final Set<Authorizations> auths, Collection<Range> currentRange) throws TableNotFoundException {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkArgument(null != currentRange && !currentRange.isEmpty());
    
    // copy the appropriate variables.
    ranges = Lists.newArrayList(currentRange);
    
    this.tableName = tableName;
    
    this.auths = Sets.newHashSet(auths);
    
    if (log.isTraceEnabled())
        log.trace("Creating scanner resource from " + tableName + " " + auths + " " + currentRange);
    
    internalTimer = new StopWatch();
    internalTimer.start();
    
    // let's pre-compute the hashcode.
    hashCode += new HashCodeBuilder().append(tableName).append(auths).append(ranges).toHashCode();
    
    baseScanner = ScannerHelper.createBatchScanner(getConnector(), tableName, auths, 2);
    
    if (baseScanner != null) {
        ((BatchScanner) baseScanner).setRanges(currentRange);
    }
    
}
 
@Override
public void reIndex(Project project) {
  if (indexingInProgress) {
    currentExecution.cancel(false);
  }
  //noinspection CodeBlock2Expr
  currentExecution = getApplication().executeOnPooledThread(() -> {
    getApplication().runReadAction(() -> {
      indexingInProgress = true;
      StopWatch timer = new StopWatch();
      timer.start();
      try {
        debug(() -> log.debug("-> Indexing requested for project " + project.getName()));
        // OrderEnumerator.orderEntries(project) is returning everything from all modules including root level module(which is called project in gradle terms)
        // So, we should not be doing anything with this

        Module[] modules = ModuleManager.getInstance(project).getModules();
        for (Module module : modules) {
          reindexModule(emptyList(), emptyList(), module);
        }
      } finally {
        indexingInProgress = false;
        timer.stop();
        debug(() -> log
            .debug("<- Indexing took " + timer.toString() + " for project " + project.getName()));
      }
    });
  });
}
 
@Override
public void reindex(Project project, Module[] modules) {
  if (indexingInProgress) {
    if (currentExecution != null) {
      currentExecution.cancel(false);
    }
  }
  //noinspection CodeBlock2Expr
  currentExecution = getApplication().executeOnPooledThread(() -> {
    getApplication().runReadAction(() -> {
      debug(() -> log.debug(
          "-> Indexing requested for a subset of modules of project " + project.getName()));
      indexingInProgress = true;
      StopWatch timer = new StopWatch();
      timer.start();
      try {
        for (Module module : modules) {
          debug(() -> log.debug("--> Indexing requested for module " + module.getName()));
          StopWatch moduleTimer = new StopWatch();
          moduleTimer.start();
          try {
            reindexModule(emptyList(), emptyList(), module);
          } finally {
            moduleTimer.stop();
            debug(() -> log.debug(
                "<-- Indexing took " + moduleTimer.toString() + " for module " + module
                    .getName()));
          }
        }
      } finally {
        indexingInProgress = false;
        timer.stop();
        debug(() -> log
            .debug("<- Indexing took " + timer.toString() + " for project " + project.getName()));
      }
    });
  });
}
 
源代码28 项目: SkyEye   文件: CacheService.java
/**
 * 将redis中的配置信息清除
 */
private void clearCache() {
    StopWatch sw = new StopWatch();
    sw.start();
    LOGGER.info("start clear config cache");

    Set<String> keys = this.redisTemplate.keys(CONFIG_PREFIX + Constants.XING_HAO);
    this.redisTemplate.delete(keys);

    sw.stop();
    LOGGER.info("clear config cache end, cost {} ms", sw.getTime());
}
 
源代码29 项目: SkyEye   文件: CacheService.java
/**
 * 将redis中的配置信息清除
 */
private void clearCache() {
    StopWatch sw = new StopWatch();
    sw.start();
    LOGGER.info("start clear config cache");

    Set<String> keys = this.redisTemplate.keys(CONFIG_PREFIX + Constants.XING_HAO);
    this.redisTemplate.delete(keys);

    sw.stop();
    LOGGER.info("clear config cache end, cost {} ms", sw.getTime());
}
 
源代码30 项目: zstack   文件: TestCreate1000Vm.java
@SyncThread(level = 1000)
private void createVm(VmInstanceInventory vm, String rootDiskUuid, List<String> nws, List<String> disks) throws ApiSenderException {
    StopWatch watch = new StopWatch();
    watch.start();
    try {
        api.createVmByFullConfig(vm, rootDiskUuid, nws, disks);
    } finally {
        watch.stop();
        timeCost.add(watch.getTime());
        latch.countDown();
    }
}
 
 类所在包
 同包方法