org.mockito.internal.matchers.GreaterThan#org.apache.hadoop.metrics2.MetricsRecordBuilder源码实例Demo

下面列出了org.mockito.internal.matchers.GreaterThan#org.apache.hadoop.metrics2.MetricsRecordBuilder 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: big-c   文件: TestStartupProgressMetrics.java
@Test
public void testRunningState() {
  setStartupProgressForRunningState(startupProgress);

  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
  assertGauge("PercentComplete", 0.375f, builder);
  assertCounter("LoadingFsImageCount", 100L, builder);
  assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
  assertCounter("LoadingFsImageTotal", 100L, builder);
  assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
  assertCounter("LoadingEditsCount", 100L, builder);
  assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
  assertCounter("LoadingEditsTotal", 200L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
源代码2 项目: hbase   文件: HBaseMetrics2HadoopMetricsAdapter.java
private void addGauge(String name, Gauge<?> gauge, MetricsRecordBuilder builder) {
  final MetricsInfo info = Interns.info(name, EMPTY_STRING);
  final Object o = gauge.getValue();

  // Figure out which gauge types metrics2 supports and call the right method
  if (o instanceof Integer) {
    builder.addGauge(info, (int) o);
  } else if (o instanceof Long) {
    builder.addGauge(info, (long) o);
  } else if (o instanceof Float) {
    builder.addGauge(info, (float) o);
  } else if (o instanceof Double) {
    builder.addGauge(info, (double) o);
  } else {
    LOG.warn("Ignoring Gauge (" + name + ") with unhandled type: " + o.getClass());
  }
}
 
源代码3 项目: hbase   文件: MetricsMasterQuotaSourceImpl.java
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info());
  if (wrapper != null) {
    // Summarize the tables
    Map<String,Entry<Long,Long>> tableUsages = wrapper.getTableSpaceUtilization();
    String tableSummary = "[]";
    if (tableUsages != null && !tableUsages.isEmpty()) {
      tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table");
    }
    record.tag(Interns.info(TABLE_QUOTA_USAGE_NAME, TABLE_QUOTA_USAGE_DESC), tableSummary);

    // Summarize the namespaces
    String nsSummary = "[]";
    Map<String,Entry<Long,Long>> namespaceUsages = wrapper.getNamespaceSpaceUtilization();
    if (namespaceUsages != null && !namespaceUsages.isEmpty()) {
      nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace");
    }
    record.tag(Interns.info(NS_QUOTA_USAGE_NAME, NS_QUOTA_USAGE_DESC), nsSummary);
  }
  metricsRegistry.snapshot(record, all);
}
 
private void checkBytesWritten(long expectedBytesWritten) throws Exception {
  // As only 3 datanodes and ozone.scm.pipeline.creation.auto.factor.one is
  // false, so only pipeline in the system.
  List<Pipeline> pipelines = cluster.getStorageContainerManager()
      .getPipelineManager().getPipelines();

  Assert.assertEquals(1, pipelines.size());
  Pipeline pipeline = pipelines.get(0);

  final String metricName =
      SCMPipelineMetrics.getBytesWrittenMetricName(pipeline);
  GenericTestUtils.waitFor(() -> {
    MetricsRecordBuilder metrics = getMetrics(
        SCMPipelineMetrics.class.getSimpleName());
    return expectedBytesWritten == getLongCounter(metricName, metrics);
  }, 500, 300000);
}
 
源代码5 项目: hadoop-ozone   文件: TestSCMPipelineMetrics.java
/**
 * Verifies pipeline destroy metric.
 */
@Test
public void testPipelineDestroy() {
  PipelineManager pipelineManager = cluster
      .getStorageContainerManager().getPipelineManager();
  Optional<Pipeline> pipeline = pipelineManager
      .getPipelines().stream().findFirst();
  Assert.assertTrue(pipeline.isPresent());
  try {
    cluster.getStorageContainerManager()
        .getPipelineManager()
        .finalizeAndDestroyPipeline(
            pipeline.get(), false);
  } catch (IOException e) {
    e.printStackTrace();
    Assert.fail();
  }
  MetricsRecordBuilder metrics = getMetrics(
      SCMPipelineMetrics.class.getSimpleName());
  assertCounter("NumPipelineDestroyed", 1L, metrics);
}
 
源代码6 项目: hbase   文件: MetricsUserSourceImpl.java
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
  // If there is a close that started be double extra sure
  // that we're not getting any locks and not putting data
  // into the metrics that should be removed. So early out
  // before even getting the lock.
  if (closed.get()) {
    return;
  }

  // Grab the read
  // This ensures that removes of the metrics
  // can't happen while we are putting them back in.
  synchronized (this) {

    // It's possible that a close happened between checking
    // the closed variable and getting the lock.
    if (closed.get()) {
      return;
    }
  }
}
 
源代码7 项目: hbase   文件: MutableHistogram.java
protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram,
    Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) {
  metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc),
      histogram.getCount());
  metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin());
  metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax());
  metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean());

  metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get25thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc),
      snapshot.getMedian());
  metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get75thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get90thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get95thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get98thPercentile());
  metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get99thPercentile());
  metricsRecordBuilder.addGauge(
      Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc),
      snapshot.get999thPercentile());
}
 
源代码8 项目: hadoop   文件: TestProtoBufRpc.java
@Test (timeout=5000)
public void testProtoBufRpc2() throws Exception {
  TestRpcService2 client = getClient2();
  
  // Test ping method
  EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
  client.ping2(null, emptyRequest);
  
  // Test echo method
  EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
      .setMessage("hello").build();
  EchoResponseProto echoResponse = client.echo2(null, echoRequest);
  Assert.assertEquals(echoResponse.getMessage(), "hello");
  
  // Ensure RPC metrics are updated
  MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name());
  assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics);
  assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics);
  
  MetricsRecordBuilder rpcDetailedMetrics = 
      getMetrics(server.getRpcDetailedMetrics().name());
  assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics);
}
 
源代码9 项目: big-c   文件: JvmMetrics.java
private void getThreadUsage(MetricsRecordBuilder rb) {
  int threadsNew = 0;
  int threadsRunnable = 0;
  int threadsBlocked = 0;
  int threadsWaiting = 0;
  int threadsTimedWaiting = 0;
  int threadsTerminated = 0;
  long threadIds[] = threadMXBean.getAllThreadIds();
  for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
    if (threadInfo == null) continue; // race protection
    switch (threadInfo.getThreadState()) {
      case NEW:           threadsNew++;           break;
      case RUNNABLE:      threadsRunnable++;      break;
      case BLOCKED:       threadsBlocked++;       break;
      case WAITING:       threadsWaiting++;       break;
      case TIMED_WAITING: threadsTimedWaiting++;  break;
      case TERMINATED:    threadsTerminated++;    break;
    }
  }
  rb.addGauge(ThreadsNew, threadsNew)
    .addGauge(ThreadsRunnable, threadsRunnable)
    .addGauge(ThreadsBlocked, threadsBlocked)
    .addGauge(ThreadsWaiting, threadsWaiting)
    .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
    .addGauge(ThreadsTerminated, threadsTerminated);
}
 
源代码10 项目: big-c   文件: MethodMetric.java
MutableMetric newTag(Class<?> resType) {
  if (resType == String.class) {
    return new MutableMetric() {
      @Override public void snapshot(MetricsRecordBuilder rb, boolean all) {
        try {
          Object ret = method.invoke(obj, (Object[]) null);
          rb.tag(info, (String) ret);
        }
        catch (Exception ex) {
          LOG.error("Error invoking method "+ method.getName(), ex);
        }
      }
    };
  }
  throw new MetricsException("Unsupported tag type: "+ resType.getName());
}
 
源代码11 项目: hadoop   文件: TestNameNodeMetrics.java
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
  // Create a file with single block with two replicas
  Path file = getTestPath("testMissingBlocks");
  createFile(file, 100, (short)1);
  
  // Corrupt the only replica of the block to result in a missing block
  LocatedBlock block = NameNodeAdapter.getBlockLocations(
      cluster.getNameNode(), file.toString(), 0, 1).get(0);
  cluster.getNamesystem().writeLock();
  try {
    bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
        "STORAGE_ID", "TEST");
  } finally {
    cluster.getNamesystem().writeUnlock();
  }
  updateMetrics();
  MetricsRecordBuilder rb = getMetrics(NS_METRICS);
  assertGauge("UnderReplicatedBlocks", 1L, rb);
  assertGauge("MissingBlocks", 1L, rb);
  assertGauge("MissingReplOneBlocks", 1L, rb);
  fs.delete(file, true);
  waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
 
源代码12 项目: hadoop   文件: TestJvmMetrics.java
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
源代码13 项目: hadoop   文件: TestNameNodeMetrics.java
/**
 * Test NN ReadOps Count and WriteOps Count
 */
@Test
public void testReadWriteOps() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
      rb);
  Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");

  //Perform create file operation
  createFile(file1_Path, 1024 * 1024,(short)2);

  // Perform read file operation on earlier created file
  readFile(fs, file1_Path);
  MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
  assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
      startWriteCounter);
}
 
源代码14 项目: hadoop   文件: TestStartupProgressMetrics.java
@Test
public void testInitialState() {
  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertCounter("ElapsedTime", 0L, builder);
  assertGauge("PercentComplete", 0.0f, builder);
  assertCounter("LoadingFsImageCount", 0L, builder);
  assertCounter("LoadingFsImageElapsedTime", 0L, builder);
  assertCounter("LoadingFsImageTotal", 0L, builder);
  assertGauge("LoadingFsImagePercentComplete", 0.0f, builder);
  assertCounter("LoadingEditsCount", 0L, builder);
  assertCounter("LoadingEditsElapsedTime", 0L, builder);
  assertCounter("LoadingEditsTotal", 0L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.0f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
源代码15 项目: hadoop   文件: TestStartupProgressMetrics.java
@Test
public void testRunningState() {
  setStartupProgressForRunningState(startupProgress);

  MetricsRecordBuilder builder = getMetrics(metrics, true);
  assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
  assertGauge("PercentComplete", 0.375f, builder);
  assertCounter("LoadingFsImageCount", 100L, builder);
  assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
  assertCounter("LoadingFsImageTotal", 100L, builder);
  assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
  assertCounter("LoadingEditsCount", 100L, builder);
  assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
  assertCounter("LoadingEditsTotal", 200L, builder);
  assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
  assertCounter("SavingCheckpointCount", 0L, builder);
  assertCounter("SavingCheckpointElapsedTime", 0L, builder);
  assertCounter("SavingCheckpointTotal", 0L, builder);
  assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
  assertCounter("SafeModeCount", 0L, builder);
  assertCounter("SafeModeElapsedTime", 0L, builder);
  assertCounter("SafeModeTotal", 0L, builder);
  assertGauge("SafeModePercentComplete", 0.0f, builder);
}
 
源代码16 项目: hadoop   文件: TestDataNodeMetrics.java
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
源代码17 项目: phoenix   文件: GlobalMetricRegistriesAdapter.java
private void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) {
    Map<String, Metric> metrics = metricRegistry.getMetrics();
    Iterator iterator = metrics.entrySet().iterator();

    while(iterator.hasNext()) {
        Entry<String, Metric> e = (Entry)iterator.next();
        String name = StringUtils.capitalize(e.getKey());
        Metric metric = e.getValue();
        if (metric instanceof Gauge) {
            this.addGauge(name, (Gauge)metric, builder);
        } else if (metric instanceof Counter) {
            this.addCounter(name, (Counter)metric, builder);
        } else if (metric instanceof Histogram) {
            this.addHistogram(name, (Histogram)metric, builder);
        } else if (metric instanceof Meter) {
            this.addMeter(name, (Meter)metric, builder);
        } else if (metric instanceof Timer) {
            this.addTimer(name, (Timer)metric, builder);
        } else {
            LOGGER.info("Ignoring unknown Metric class " + metric.getClass().getName());
        }
    }
}
 
源代码18 项目: big-c   文件: TestDataNodeMetrics.java
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
@Test
public void testMetricTags() throws Exception {
  String accountName =
      testAccount.getRealAccount().getBlobEndpoint()
      .getAuthority();
  String containerName =
      testAccount.getRealContainer().getName();
  MetricsRecordBuilder myMetrics = getMyMetrics();
  verify(myMetrics).add(argThat(
      new TagMatcher("accountName", accountName)
      ));
  verify(myMetrics).add(argThat(
      new TagMatcher("containerName", containerName)
      ));
  verify(myMetrics).add(argThat(
      new TagMatcher("Context", "azureFileSystem")
      ));
  verify(myMetrics).add(argThat(
      new TagExistsMatcher("wasbFileSystemId")
      ));
}
 
源代码20 项目: hadoop   文件: MethodMetric.java
MutableMetric newCounter(final Class<?> type) {
  if (isInt(type) || isLong(type)) {
    return new MutableMetric() {
      @Override public void snapshot(MetricsRecordBuilder rb, boolean all) {
        try {
          Object ret = method.invoke(obj, (Object[])null);
          if (isInt(type)) rb.addCounter(info, ((Integer) ret).intValue());
          else rb.addCounter(info, ((Long) ret).longValue());
        }
        catch (Exception ex) {
          LOG.error("Error invoking method "+ method.getName(), ex);
        }
      }
    };
  }
  throw new MetricsException("Unsupported counter type: "+ type.getName());
}
 
源代码21 项目: hadoop   文件: JvmMetrics.java
private void getGcUsage(MetricsRecordBuilder rb) {
  long count = 0;
  long timeMillis = 0;
  for (GarbageCollectorMXBean gcBean : gcBeans) {
    long c = gcBean.getCollectionCount();
    long t = gcBean.getCollectionTime();
    MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
    rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
    count += c;
    timeMillis += t;
  }
  rb.addCounter(GcCount, count)
    .addCounter(GcTimeMillis, timeMillis);
  
  if (pauseMonitor != null) {
    rb.addCounter(GcNumWarnThresholdExceeded,
        pauseMonitor.getNumGcWarnThreadholdExceeded());
    rb.addCounter(GcNumInfoThresholdExceeded,
        pauseMonitor.getNumGcInfoThresholdExceeded());
    rb.addCounter(GcTotalExtraSleepTime,
        pauseMonitor.getTotalGcExtraSleepTime());
  }
}
 
源代码22 项目: hadoop   文件: JvmMetrics.java
private void getThreadUsage(MetricsRecordBuilder rb) {
  int threadsNew = 0;
  int threadsRunnable = 0;
  int threadsBlocked = 0;
  int threadsWaiting = 0;
  int threadsTimedWaiting = 0;
  int threadsTerminated = 0;
  long threadIds[] = threadMXBean.getAllThreadIds();
  for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
    if (threadInfo == null) continue; // race protection
    switch (threadInfo.getThreadState()) {
      case NEW:           threadsNew++;           break;
      case RUNNABLE:      threadsRunnable++;      break;
      case BLOCKED:       threadsBlocked++;       break;
      case WAITING:       threadsWaiting++;       break;
      case TIMED_WAITING: threadsTimedWaiting++;  break;
      case TERMINATED:    threadsTerminated++;    break;
    }
  }
  rb.addGauge(ThreadsNew, threadsNew)
    .addGauge(ThreadsRunnable, threadsRunnable)
    .addGauge(ThreadsBlocked, threadsBlocked)
    .addGauge(ThreadsWaiting, threadsWaiting)
    .addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
    .addGauge(ThreadsTerminated, threadsTerminated);
}
 
源代码23 项目: hbase   文件: MetricsIOSourceImpl.java
@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
  MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);

  // wrapper can be null because this function is called inside of init.
  if (wrapper != null) {
    mrb.addCounter(Interns.info(CHECKSUM_FAILURES_KEY, CHECKSUM_FAILURES_DESC),
      wrapper.getChecksumFailures());
  }

  metricsRegistry.snapshot(mrb, all);
}
 
源代码24 项目: hadoop-ozone   文件: TestSCMContainerMetrics.java
@Test
public void testSCMContainerMetrics() {
  SCMMXBean scmmxBean = mock(SCMMXBean.class);

  Map<String, Integer> stateInfo = new HashMap<String, Integer>() {{
      put(HddsProtos.LifeCycleState.OPEN.toString(), 2);
      put(HddsProtos.LifeCycleState.CLOSING.toString(), 3);
      put(HddsProtos.LifeCycleState.QUASI_CLOSED.toString(), 4);
      put(HddsProtos.LifeCycleState.CLOSED.toString(), 5);
      put(HddsProtos.LifeCycleState.DELETING.toString(), 6);
      put(HddsProtos.LifeCycleState.DELETED.toString(), 7);
    }};


  when(scmmxBean.getContainerStateCount()).thenReturn(stateInfo);

  MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class);
  when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb);

  MetricsCollector metricsCollector = mock(MetricsCollector.class);
  when(metricsCollector.addRecord(anyString())).thenReturn(mb);

  SCMContainerMetrics containerMetrics = new SCMContainerMetrics(scmmxBean);

  containerMetrics.getMetrics(metricsCollector, true);

  verify(mb, times(1)).addGauge(Interns.info("OpenContainers",
      "Number of open containers"), 2);
  verify(mb, times(1)).addGauge(Interns.info("ClosingContainers",
      "Number of containers in closing state"), 3);
  verify(mb, times(1)).addGauge(Interns.info("QuasiClosedContainers",
      "Number of containers in quasi closed state"), 4);
  verify(mb, times(1)).addGauge(Interns.info("ClosedContainers",
      "Number of containers in closed state"), 5);
  verify(mb, times(1)).addGauge(Interns.info("DeletingContainers",
      "Number of containers in deleting state"), 6);
  verify(mb, times(1)).addGauge(Interns.info("DeletedContainers",
      "Number of containers in deleted state"), 7);
}
 
源代码25 项目: big-c   文件: MutableCounterInt.java
@Override
public void snapshot(MetricsRecordBuilder builder, boolean all) {
  if (all || changed()) {
    builder.addCounter(info(), value());
    clearChanged();
  }
}
 
源代码26 项目: big-c   文件: TestRPC.java
@Test
public void testRpcMetrics() throws Exception {
  Configuration configuration = new Configuration();
  final int interval = 1;
  configuration.setBoolean(CommonConfigurationKeys.
      RPC_METRICS_QUANTILE_ENABLE, true);
  configuration.set(CommonConfigurationKeys.
      RPC_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  final Server server = new RPC.Builder(configuration)
      .setProtocol(TestProtocol.class).setInstance(new TestImpl())
      .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
      .build();
  server.start();
  final TestProtocol proxy = RPC.getProxy(TestProtocol.class,
      TestProtocol.versionID, server.getListenerAddress(), configuration);
  try {
    for (int i=0; i<1000; i++) {
      proxy.ping();
      proxy.echo("" + i);
    }
    MetricsRecordBuilder rpcMetrics =
        getMetrics(server.getRpcMetrics().name());
    assertTrue("Expected non-zero rpc queue time",
        getLongCounter("RpcQueueTimeNumOps", rpcMetrics) > 0);
    assertTrue("Expected non-zero rpc processing time",
        getLongCounter("RpcProcessingTimeNumOps", rpcMetrics) > 0);
    MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",
        rpcMetrics);
    MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",
        rpcMetrics);
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
源代码27 项目: kylin   文件: HadoopMetrics2ReporterTest.java
@Test
public void testGaugeReporting() {
    final AtomicLong gaugeValue = new AtomicLong(0L);
    @SuppressWarnings("rawtypes")
    final Gauge gauge = new Gauge<Long>() {
        @Override
        public Long getValue() {
            return gaugeValue.get();
        }
    };

    @SuppressWarnings("rawtypes")
    TreeMap<String, Gauge> gauges = new TreeMap<>();
    gauges.put("my_gauge", gauge);
    // Add the metrics objects to the internal "queues" by hand
    metrics2Reporter.setDropwizardGauges(gauges);

    // Set some values
    gaugeValue.set(5L);

    MetricsCollector collector = mock(MetricsCollector.class);
    MetricsRecordBuilder recordBuilder = mock(MetricsRecordBuilder.class);

    Mockito.when(collector.addRecord(recordName)).thenReturn(recordBuilder);

    // Make sure a value of 5 gets reported
    metrics2Reporter.getMetrics(collector, true);

    verify(recordBuilder).addGauge(Interns.info("my_gauge", ""), gaugeValue.get());
    verifyRecordBuilderUnits(recordBuilder);

    // Should not be the same instance we gave before. Our map should have gotten swapped out.
    assertTrue("Should not be the same map instance after collection",
            gauges != metrics2Reporter.getDropwizardGauges());
}
 
源代码28 项目: phoenix   文件: GlobalMetricRegistriesAdapter.java
private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) {
    builder.addGauge(Interns.info(name + "_count", ""), meter.getCount());
    builder.addGauge(Interns.info(name + "_mean_rate", ""), meter.getMeanRate());
    builder.addGauge(Interns.info(name + "_1min_rate", ""), meter.getOneMinuteRate());
    builder.addGauge(Interns.info(name + "_5min_rate", ""), meter.getFiveMinuteRate());
    builder.addGauge(Interns.info(name + "_15min_rate", ""), meter.getFifteenMinuteRate());
}
 
源代码29 项目: hadoop   文件: TestUserGroupInformation.java
public static void verifyLoginMetrics(long success, int failure)
    throws IOException {
  // Ensure metrics related to kerberos login is updated.
  MetricsRecordBuilder rb = getMetrics("UgiMetrics");
  if (success > 0) {
    assertCounter("LoginSuccessNumOps", success, rb);
    assertGaugeGt("LoginSuccessAvgTime", 0, rb);
  }
  if (failure > 0) {
    assertCounter("LoginFailureNumPos", failure, rb);
    assertGaugeGt("LoginFailureAvgTime", 0, rb);
  }
}
 
源代码30 项目: hadoop   文件: TestMetricsAnnotations.java
@Test public void testFields() {
  MyMetrics metrics = new MyMetrics();
  MetricsSource source = MetricsAnnotations.makeSource(metrics);

  metrics.c1.incr();
  metrics.c2.incr();
  metrics.g1.incr();
  metrics.g2.incr();
  metrics.g3.incr();
  metrics.r1.add(1);
  metrics.s1.add(1);
  metrics.rs1.add("rs1", 1);

  MetricsRecordBuilder rb = getMetrics(source);

  verify(rb).addCounter(info("C1", "C1"), 1);
  verify(rb).addCounter(info("Counter2", "Counter2 desc"), 1L);
  verify(rb).addGauge(info("G1", "G1"), 1);
  verify(rb).addGauge(info("G2", "G2"), 1);
  verify(rb).addGauge(info("G3", "g3 desc"), 1L);
  verify(rb).addCounter(info("R1NumOps", "Number of ops for r1"), 1L);
  verify(rb).addGauge(info("R1AvgTime", "Average time for r1"), 1.0);
  verify(rb).addCounter(info("S1NumOps", "Number of ops for s1"), 1L);
  verify(rb).addGauge(info("S1AvgTime", "Average time for s1"), 1.0);
  verify(rb).addCounter(info("Rs1NumOps", "Number of ops for rs1"), 1L);
  verify(rb).addGauge(info("Rs1AvgTime", "Average time for rs1"), 1.0);
}