类org.apache.hadoop.util.JvmPauseMonitor源码实例Demo

下面列出了怎么用org.apache.hadoop.util.JvmPauseMonitor的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestJvmMetrics.java
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
源代码2 项目: big-c   文件: TestJvmMetrics.java
@Test public void testPresence() {
  JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(new Configuration());
  JvmMetrics jvmMetrics = new JvmMetrics("test", "test");
  jvmMetrics.setPauseMonitor(pauseMonitor);
  MetricsRecordBuilder rb = getMetrics(jvmMetrics);
  MetricsCollector mc = rb.parent();

  verify(mc).addRecord(JvmMetrics);
  verify(rb).tag(ProcessName, "test");
  verify(rb).tag(SessionId, "test");
  for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
    if (info.name().startsWith("Mem"))
      verify(rb).addGauge(eq(info), anyFloat());
    else if (info.name().startsWith("Gc"))
      verify(rb).addCounter(eq(info), anyLong());
    else if (info.name().startsWith("Threads"))
      verify(rb).addGauge(eq(info), anyInt());
    else if (info.name().startsWith("Log"))
      verify(rb).addCounter(eq(info), anyLong());
  }
}
 
源代码3 项目: hadoop-ozone   文件: StorageContainerManager.java
/**
 * Start service.
 */
public void start() throws IOException {
  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage(
        "StorageContainerLocationProtocol RPC server",
        getClientRpcAddress()));
  }

  ms = HddsServerUtil
      .initializeMetrics(configuration, "StorageContainerManager");

  commandWatcherLeaseManager.start();
  getClientProtocolServer().start();

  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
        "server", getBlockProtocolServer().getBlockRpcAddress()));
  }
  getBlockProtocolServer().start();

  if (LOG.isInfoEnabled()) {
    LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
        "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
  }
  getDatanodeProtocolServer().start();
  if (getSecurityProtocolServer() != null) {
    getSecurityProtocolServer().start();
  }

  httpServer.start();
  scmBlockManager.start();

  // Start jvm monitor
  jvmPauseMonitor = new JvmPauseMonitor();
  jvmPauseMonitor.init(configuration);
  jvmPauseMonitor.start();

  setStartTime();
}
 
源代码4 项目: hadoop-ozone   文件: OzoneManager.java
private void startJVMPauseMonitor() {
  // Start jvm monitor
  jvmPauseMonitor = new JvmPauseMonitor();
  jvmPauseMonitor.init(configuration);
  jvmPauseMonitor.start();
}
 
源代码5 项目: hadoop   文件: NameNode.java
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  startCommonServices(conf);
}
 
源代码6 项目: hadoop   文件: DataNode.java
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
源代码7 项目: hadoop   文件: JvmMetrics.java
public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
  this.pauseMonitor = pauseMonitor;
}
 
源代码8 项目: big-c   文件: NameNode.java
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
protected void initialize(Configuration conf) throws IOException {
  if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
    String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
    if (intervals != null) {
      conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
        intervals);
    }
  }

  UserGroupInformation.setConfiguration(conf);
  loginAsNameNodeUser(conf);

  NameNode.initMetrics(conf, this.getRole());
  StartupProgressMetrics.register(startupProgress);

  if (NamenodeRole.NAMENODE == role) {
    startHttpServer(conf);
  }

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  loadNamesystem(conf);

  rpcServer = createRpcServer(conf);
  if (clientNamenodeAddress == null) {
    // This is expected for MiniDFSCluster. Set it now using 
    // the RPC server's bind address.
    clientNamenodeAddress = 
        NetUtils.getHostPortString(rpcServer.getRpcAddress());
    LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
        + " this namenode/service.");
  }
  if (NamenodeRole.NAMENODE == role) {
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  startCommonServices(conf);
}
 
源代码9 项目: big-c   文件: DataNode.java
/**
 * This method starts the data node with the specified conf.
 * 
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, 
                   List<StorageLocation> dataDirs,
                   SecureResources resources
                   ) throws IOException {

  // settings global for all BPs in the Data Node
  this.secureResources = resources;
  synchronized (this) {
    this.dataDirs = dataDirs;
  }
  this.conf = conf;
  this.dnConf = new DNConf(conf);
  checkSecureConfig(dnConf, conf, resources);

  this.spanReceiverHost =
    SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);

  if (dnConf.maxLockedMemory > 0) {
    if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
      throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) is greater than zero and native code is not available.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
    }
    if (Path.WINDOWS) {
      NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
    } else {
      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
      if (dnConf.maxLockedMemory > ulimit) {
        throw new RuntimeException(String.format(
          "Cannot start datanode because the configured max locked memory" +
          " size (%s) of %d bytes is more than the datanode's available" +
          " RLIMIT_MEMLOCK ulimit of %d bytes.",
          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          dnConf.maxLockedMemory,
          ulimit));
      }
    }
  }
  LOG.info("Starting DataNode with maxLockedMemory = " +
      dnConf.maxLockedMemory);

  storage = new DataStorage();
  
  // global DN settings
  registerMXBean();
  initDataXceiver(conf);
  startInfoServer(conf);
  pauseMonitor = new JvmPauseMonitor(conf);
  pauseMonitor.start();

  // BlockPoolTokenSecretManager is required to create ipc server.
  this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();

  // Login is done by now. Set the DN user name.
  dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
  LOG.info("dnUserName = " + dnUserName);
  LOG.info("supergroup = " + supergroup);
  initIpcServer(conf);

  metrics = DataNodeMetrics.create(conf, getDisplayName());
  metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
  
  blockPoolManager = new BlockPoolManager(this);
  blockPoolManager.refreshNamenodes(conf);

  // Create the ReadaheadPool from the DataNode context so we can
  // exit without having to explicitly shutdown its thread pool.
  readaheadPool = ReadaheadPool.getInstance();
  saslClient = new SaslDataTransferClient(dnConf.conf, 
      dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
  saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
 
源代码10 项目: big-c   文件: JvmMetrics.java
public void setPauseMonitor(final JvmPauseMonitor pauseMonitor) {
  this.pauseMonitor = pauseMonitor;
}
 
 类所在包
 类方法
 同包方法