org.apache.commons.cli.MissingArgumentException#org.apache.hadoop.yarn.api.records.NodeState源码实例Demo

下面列出了org.apache.commons.cli.MissingArgumentException#org.apache.hadoop.yarn.api.records.NodeState 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: big-c   文件: TestResourceTrackerService.java
private void checkUnealthyNMCount(MockRM rm, MockNM nm1, boolean health,
    int count) throws Exception {
  
  int waitCount = 0;
  while((rm.getRMContext().getRMNodes().get(nm1.getNodeId())
      .getState() != NodeState.UNHEALTHY) == health
      && waitCount++ < 20) {
    synchronized (this) {
      wait(100);
    }
  }
  Assert.assertFalse((rm.getRMContext().getRMNodes().get(nm1.getNodeId())
      .getState() != NodeState.UNHEALTHY) == health);
  Assert.assertEquals("Unhealthy metrics not incremented", count,
      ClusterMetrics.getMetrics().getUnhealthyNMs());
}
 
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
源代码3 项目: flink   文件: AbstractYarnClusterDescriptor.java
private ClusterResourceDescription getCurrentFreeClusterResources(YarnClient yarnClient) throws YarnException, IOException {
	List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);

	int totalFreeMemory = 0;
	int containerLimit = 0;
	int[] nodeManagersFree = new int[nodes.size()];

	for (int i = 0; i < nodes.size(); i++) {
		NodeReport rep = nodes.get(i);
		int free = rep.getCapability().getMemory() - (rep.getUsed() != null ? rep.getUsed().getMemory() : 0);
		nodeManagersFree[i] = free;
		totalFreeMemory += free;
		if (free > containerLimit) {
			containerLimit = free;
		}
	}
	return new ClusterResourceDescription(totalFreeMemory, containerLimit, nodeManagersFree);
}
 
源代码4 项目: hadoop   文件: RMNodeImpl.java
private void updateMetricsForRejoinedNode(NodeState previousNodeState) {
  ClusterMetrics metrics = ClusterMetrics.getMetrics();
  metrics.incrNumActiveNodes();

  switch (previousNodeState) {
  case LOST:
    metrics.decrNumLostNMs();
    break;
  case REBOOTED:
    metrics.decrNumRebootedNMs();
    break;
  case DECOMMISSIONED:
    metrics.decrDecommisionedNMs();
    break;
  case UNHEALTHY:
    metrics.decrNumUnhealthyNMs();
    break;
  default:
    LOG.debug("Unexpected previous node state");    
  }
}
 
源代码5 项目: hadoop   文件: JobImpl.java
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
  // rerun previously successful map tasks
  List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId);
  if(taskAttemptIdList != null) {
    String mesg = "TaskAttempt killed because it ran on unusable node "
        + nodeId;
    for(TaskAttemptId id : taskAttemptIdList) {
      if(TaskType.MAP == id.getTaskId().getTaskType()) {
        // reschedule only map tasks because their outputs maybe unusable
        LOG.info(mesg + ". AttemptId:" + id);
        eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
      }
    }
  }
  // currently running task attempts on unusable nodes are handled in
  // RMContainerAllocator
}
 
源代码6 项目: big-c   文件: MockNodes.java
public static List<RMNode> newNodes(int racks, int nodesPerRack,
                                      Resource perNode) {
  List<RMNode> list = Lists.newArrayList();
  for (int i = 0; i < racks; ++i) {
    for (int j = 0; j < nodesPerRack; ++j) {
      if (j == (nodesPerRack - 1)) {
        // One unhealthy node per rack.
        list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
      }
      if (j == 0) {
        // One node with label
        list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x")));
      } else {
        list.add(newNodeInfo(i, perNode));
      }
    }
  }
  return list;
}
 
源代码7 项目: tez   文件: TestAMNodeTracker.java
@Test(timeout=5000)
public void testHealthUpdateUnknownNode() {
  AppContext appContext = mock(AppContext.class);

  AMNodeTracker amNodeTracker = new AMNodeTracker(eventHandler, appContext);
  doReturn(amNodeTracker).when(appContext).getNodeTracker();
  amNodeTracker.init(new Configuration(false));
  amNodeTracker.start();

  NodeId nodeId = NodeId.newInstance("unknownhost", 2342);

  NodeReport nodeReport = generateNodeReport(nodeId, NodeState.UNHEALTHY);
  amNodeTracker.handle(new AMNodeEventStateChanged(nodeReport, 0));
  dispatcher.await();

  amNodeTracker.stop();
  // No exceptions - the status update was ignored. Not bothering to capture
  // the log message for verification.
}
 
源代码8 项目: big-c   文件: JobImpl.java
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
  // rerun previously successful map tasks
  List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId);
  if(taskAttemptIdList != null) {
    String mesg = "TaskAttempt killed because it ran on unusable node "
        + nodeId;
    for(TaskAttemptId id : taskAttemptIdList) {
      if(TaskType.MAP == id.getTaskId().getTaskType()) {
        // reschedule only map tasks because their outputs maybe unusable
        LOG.info(mesg + ". AttemptId:" + id);
        eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
      }
    }
  }
  // currently running task attempts on unusable nodes are handled in
  // RMContainerAllocator
}
 
源代码9 项目: hadoop   文件: TestRMWebServicesNodes.java
@Test
public void testNodesQueryNew() throws JSONException, Exception {
  WebResource r = resource();
  MockNM nm1 = rm.registerNode("h1:1234", 5120);
  MockNM nm2 = rm.registerNode("h2:1235", 5121);
  rm.sendNodeStarted(nm1);
  rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
  rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);

  ClientResponse response = r.path("ws").path("v1").path("cluster")
      .path("nodes").queryParam("states", NodeState.NEW.toString())
      .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);

  assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
  JSONObject json = response.getEntity(JSONObject.class);
  assertEquals("incorrect number of elements", 1, json.length());
  JSONObject nodes = json.getJSONObject("nodes");
  assertEquals("incorrect number of elements", 1, nodes.length());
  JSONArray nodeArray = nodes.getJSONArray("node");
  assertEquals("incorrect number of elements", 1, nodeArray.length());
  JSONObject info = nodeArray.getJSONObject(0);

  verifyNodeInfo(info, nm2);
}
 
源代码10 项目: hadoop   文件: TestRMWebServicesNodes.java
@Test
public void testNodesQueryRunning() throws JSONException, Exception {
  WebResource r = resource();
  MockNM nm1 = rm.registerNode("h1:1234", 5120);
  MockNM nm2 = rm.registerNode("h2:1235", 5121);
  rm.sendNodeStarted(nm1);
  rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
  rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
  ClientResponse response = r.path("ws").path("v1").path("cluster")
      .path("nodes").queryParam("states", "running")
      .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
  assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
  JSONObject json = response.getEntity(JSONObject.class);
  assertEquals("incorrect number of elements", 1, json.length());
  JSONObject nodes = json.getJSONObject("nodes");
  assertEquals("incorrect number of elements", 1, nodes.length());
  JSONArray nodeArray = nodes.getJSONArray("node");
  assertEquals("incorrect number of elements", 1, nodeArray.length());
}
 
源代码11 项目: big-c   文件: RMNodeImpl.java
public void handle(RMNodeEvent event) {
  LOG.debug("Processing " + event.getNodeId() + " of type " + event.getType());
  try {
    writeLock.lock();
    NodeState oldState = getState();
    try {
       stateMachine.doTransition(event.getType(), event);
    } catch (InvalidStateTransitonException e) {
      LOG.error("Can't handle this event at current state", e);
      LOG.error("Invalid event " + event.getType() + 
          " on Node  " + this.nodeId);
    }
    if (oldState != getState()) {
      LOG.info(nodeId + " Node Transitioned from " + oldState + " to "
               + getState());
    }
  }
  
  finally {
    writeLock.unlock();
  }
}
 
源代码12 项目: hadoop   文件: TestRMWebServicesNodes.java
@Test
public void testQueryAll() throws Exception {
  WebResource r = resource();
  MockNM nm1 = rm.registerNode("h1:1234", 5120);
  MockNM nm2 = rm.registerNode("h2:1235", 5121);
  MockNM nm3 = rm.registerNode("h3:1236", 5122);
  rm.sendNodeStarted(nm1);
  rm.sendNodeStarted(nm3);
  rm.NMwaitForState(nm1.getNodeId(), NodeState.RUNNING);
  rm.NMwaitForState(nm2.getNodeId(), NodeState.NEW);
  rm.sendNodeLost(nm3);

  ClientResponse response = r.path("ws").path("v1").path("cluster")
      .path("nodes")
      .queryParam("states", Joiner.on(',').join(EnumSet.allOf(NodeState.class)))
      .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);

  assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
  JSONObject json = response.getEntity(JSONObject.class);
  JSONObject nodes = json.getJSONObject("nodes");
  assertEquals("incorrect number of elements", 1, nodes.length());
  JSONArray nodeArray = nodes.getJSONArray("node");
  assertEquals("incorrect number of elements", 3, nodeArray.length());
}
 
源代码13 项目: hadoop   文件: MockNodes.java
public static List<RMNode> newNodes(int racks, int nodesPerRack,
                                      Resource perNode) {
  List<RMNode> list = Lists.newArrayList();
  for (int i = 0; i < racks; ++i) {
    for (int j = 0; j < nodesPerRack; ++j) {
      if (j == (nodesPerRack - 1)) {
        // One unhealthy node per rack.
        list.add(nodeInfo(i, perNode, NodeState.UNHEALTHY));
      }
      if (j == 0) {
        // One node with label
        list.add(nodeInfo(i, perNode, NodeState.RUNNING, ImmutableSet.of("x")));
      } else {
        list.add(newNodeInfo(i, perNode));
      }
    }
  }
  return list;
}
 
源代码14 项目: hadoop   文件: MockNodes.java
public MockRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
    Resource perNode, String rackName, String healthReport,
    long lastHealthReportTime, int cmdPort, String hostName, NodeState state,
    Set<String> labels) {
  this.nodeId = nodeId;
  this.nodeAddr = nodeAddr;
  this.httpAddress = httpAddress;
  this.perNode = perNode;
  this.rackName = rackName;
  this.healthReport = healthReport;
  this.lastHealthReportTime = lastHealthReportTime;
  this.cmdPort = cmdPort;
  this.hostName = hostName;
  this.state = state;
  this.labels = labels;
}
 
源代码15 项目: big-c   文件: TestRMNodeTransitions.java
@Test
public void testRunningDecommission() {
  RMNodeImpl node = getRunningNode();
  ClusterMetrics cm = ClusterMetrics.getMetrics();
  int initialActive = cm.getNumActiveNMs();
  int initialLost = cm.getNumLostNMs();
  int initialUnhealthy = cm.getUnhealthyNMs();
  int initialDecommissioned = cm.getNumDecommisionedNMs();
  int initialRebooted = cm.getNumRebootedNMs();
  node.handle(new RMNodeEvent(node.getNodeID(),
      RMNodeEventType.DECOMMISSION));
  Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs());
  Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs());
  Assert.assertEquals("Unhealthy Nodes",
      initialUnhealthy, cm.getUnhealthyNMs());
  Assert.assertEquals("Decommissioned Nodes",
      initialDecommissioned + 1, cm.getNumDecommisionedNMs());
  Assert.assertEquals("Rebooted Nodes",
      initialRebooted, cm.getNumRebootedNMs());
  Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState());
}
 
源代码16 项目: jumbune   文件: ClusterProfilingHelper.java
/**
 * Get the available v cores in cluster.
 * @param rmCommunicator 
 *
 * @return the available v cores in cluster
 * @throws IOException Signals that an I/O exception has occurred.
 */
private int getAvailableVCoresInCluster(RMCommunicator rmCommunicator) throws IOException {
	List<NodeReport> nodeReports = null;
	try {
		nodeReports = rmCommunicator.getNodeReports();
	} catch (YarnException e) {
		LOGGER.error(JumbuneRuntimeException.throwYarnException(e.getStackTrace()));
	}
	Set<String> hostname = new HashSet<String>();
	
	int totalVCores = 0;
	int usedVCores = 0;
	for(NodeReport report: nodeReports){
		if(!hostname.contains(report.getHttpAddress())  && report.getNodeState().equals(NodeState.RUNNING)){
		hostname.add(report.getHttpAddress());
		totalVCores += report.getCapability().getVirtualCores();
		if(report.getUsed()!=null){
			usedVCores += report.getUsed().getVirtualCores();
			}
		}
	}
	int availableVCores = totalVCores - usedVCores;
	return availableVCores ;
}
 
源代码17 项目: hadoop   文件: TestRMNodeTransitions.java
@Test
public void testRunningExpire() {
  RMNodeImpl node = getRunningNode();
  ClusterMetrics cm = ClusterMetrics.getMetrics();
  int initialActive = cm.getNumActiveNMs();
  int initialLost = cm.getNumLostNMs();
  int initialUnhealthy = cm.getUnhealthyNMs();
  int initialDecommissioned = cm.getNumDecommisionedNMs();
  int initialRebooted = cm.getNumRebootedNMs();
  node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE));
  Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs());
  Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs());
  Assert.assertEquals("Unhealthy Nodes",
      initialUnhealthy, cm.getUnhealthyNMs());
  Assert.assertEquals("Decommissioned Nodes",
      initialDecommissioned, cm.getNumDecommisionedNMs());
  Assert.assertEquals("Rebooted Nodes",
      initialRebooted, cm.getNumRebootedNMs());
  Assert.assertEquals(NodeState.LOST, node.getState());
}
 
源代码18 项目: hadoop   文件: TestRMNodeTransitions.java
@Test
public void testUnhealthyExpire() {
  RMNodeImpl node = getUnhealthyNode();
  ClusterMetrics cm = ClusterMetrics.getMetrics();
  int initialActive = cm.getNumActiveNMs();
  int initialLost = cm.getNumLostNMs();
  int initialUnhealthy = cm.getUnhealthyNMs();
  int initialDecommissioned = cm.getNumDecommisionedNMs();
  int initialRebooted = cm.getNumRebootedNMs();
  node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE));
  Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs());
  Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs());
  Assert.assertEquals("Unhealthy Nodes",
      initialUnhealthy - 1, cm.getUnhealthyNMs());
  Assert.assertEquals("Decommissioned Nodes",
      initialDecommissioned, cm.getNumDecommisionedNMs());
  Assert.assertEquals("Rebooted Nodes",
      initialRebooted, cm.getNumRebootedNMs());
  Assert.assertEquals(NodeState.LOST, node.getState());
}
 
源代码19 项目: hadoop   文件: TestRMNodeTransitions.java
@Test
public void testRunningRebooting() {
  RMNodeImpl node = getRunningNode();
  ClusterMetrics cm = ClusterMetrics.getMetrics();
  int initialActive = cm.getNumActiveNMs();
  int initialLost = cm.getNumLostNMs();
  int initialUnhealthy = cm.getUnhealthyNMs();
  int initialDecommissioned = cm.getNumDecommisionedNMs();
  int initialRebooted = cm.getNumRebootedNMs();
  node.handle(new RMNodeEvent(node.getNodeID(),
      RMNodeEventType.REBOOTING));
  Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs());
  Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs());
  Assert.assertEquals("Unhealthy Nodes",
      initialUnhealthy, cm.getUnhealthyNMs());
  Assert.assertEquals("Decommissioned Nodes",
      initialDecommissioned, cm.getNumDecommisionedNMs());
  Assert.assertEquals("Rebooted Nodes",
      initialRebooted + 1, cm.getNumRebootedNMs());
  Assert.assertEquals(NodeState.REBOOTED, node.getState());
}
 
源代码20 项目: hadoop   文件: TestRMNodeTransitions.java
@Test
public void testReconnect() {
  RMNodeImpl node = getRunningNode();
  ClusterMetrics cm = ClusterMetrics.getMetrics();
  int initialActive = cm.getNumActiveNMs();
  int initialLost = cm.getNumLostNMs();
  int initialUnhealthy = cm.getUnhealthyNMs();
  int initialDecommissioned = cm.getNumDecommisionedNMs();
  int initialRebooted = cm.getNumRebootedNMs();
  node.handle(new RMNodeReconnectEvent(node.getNodeID(), node, null, null));
  Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs());
  Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs());
  Assert.assertEquals("Unhealthy Nodes",
      initialUnhealthy, cm.getUnhealthyNMs());
  Assert.assertEquals("Decommissioned Nodes",
      initialDecommissioned, cm.getNumDecommisionedNMs());
  Assert.assertEquals("Rebooted Nodes",
      initialRebooted, cm.getNumRebootedNMs());
  Assert.assertEquals(NodeState.RUNNING, node.getState());
  Assert.assertNotNull(nodesListManagerEvent);
  Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,
      nodesListManagerEvent.getType());
}
 
源代码21 项目: jumbune   文件: ClusterProfilingHelper.java
/**
 * Get the total memory available in cluster.
 * @param rmCommunicator 
 *
 * @return the total memory available in cluster
 * @throws YarnException the yarn exception
 * @throws IOException Signals that an I/O exception has occurred.
 */
private int getTotalMemoryAvailableInCluster(RMCommunicator rmCommunicator) throws YarnException, IOException{
	List<NodeReport> reports=rmCommunicator.getNodeReports();
	int availableMemory=0;
	
	Set<String> hostname = new HashSet<String>();
	
	for (NodeReport nodeReport : reports) {
		if(!hostname.contains(nodeReport.getHttpAddress())  && nodeReport.getNodeState().equals(NodeState.RUNNING)){
		hostname.add(nodeReport.getHttpAddress());
		availableMemory+=nodeReport.getCapability().getMemory() - 
				(nodeReport.getUsed()==null?0:nodeReport.getUsed().getMemory());
		}
	}
	return availableMemory;
}
 
源代码22 项目: hadoop   文件: TestRMNodeTransitions.java
@Test
public void testResourceUpdateOnNewNode() {
  RMNodeImpl node = getNewNode(Resource.newInstance(4096, 4, 4));
  Resource oldCapacity = node.getTotalCapability();
  assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
  assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
  node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
      ResourceOption.newInstance(Resource.newInstance(2048, 2, 2),
          RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
  Resource newCapacity = node.getTotalCapability();
  assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
  assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
  assertEquals("GPU resource is not match.", newCapacity.getGpuCores(), 2);
  
  Assert.assertEquals(NodeState.NEW, node.getState());
}
 
源代码23 项目: big-c   文件: TestRMNodeTransitions.java
@Test
public void testResourceUpdateOnRunningNode() {
  RMNodeImpl node = getRunningNode();
  Resource oldCapacity = node.getTotalCapability();
  assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
  assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
  node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
      ResourceOption.newInstance(Resource.newInstance(2048, 2), 
          RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
  Resource newCapacity = node.getTotalCapability();
  assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
  assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
  
  Assert.assertEquals(NodeState.RUNNING, node.getState());
  Assert.assertNotNull(nodesListManagerEvent);
  Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,
      nodesListManagerEvent.getType());
}
 
源代码24 项目: hadoop   文件: NodeCLI.java
/**
 * Lists the nodes matching the given node states
 * 
 * @param nodeStates
 * @throws YarnException
 * @throws IOException
 */
private void listClusterNodes(Set<NodeState> nodeStates) 
          throws YarnException, IOException {
  PrintWriter writer = new PrintWriter(
      new OutputStreamWriter(sysout, Charset.forName("UTF-8")));
  List<NodeReport> nodesReport = client.getNodeReports(
                                     nodeStates.toArray(new NodeState[0]));
  writer.println("Total Nodes:" + nodesReport.size());
  writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address",
      "Number-of-Running-Containers");
  for (NodeReport nodeReport : nodesReport) {
    writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport
        .getNodeState(), nodeReport.getHttpAddress(), nodeReport
        .getNumContainers());
  }
  writer.flush();
}
 
源代码25 项目: hadoop   文件: TestYarnCLI.java
private List<NodeReport> getNodeReports(int noOfNodes, NodeState state,
    boolean emptyNodeLabel) {
  List<NodeReport> nodeReports = new ArrayList<NodeReport>();

  for (int i = 0; i < noOfNodes; i++) {
    Set<String> nodeLabels = null;
    if (!emptyNodeLabel) {
      // node labels is not ordered, but when we output it, it should be
      // ordered
      nodeLabels = ImmutableSet.of("c", "b", "a", "x", "z", "y");
    }
    NodeReport nodeReport = NodeReport.newInstance(NodeId
      .newInstance("host" + i, 0), state, "host" + 1 + ":8888",
        "rack1", Records.newRecord(Resource.class), Records
            .newRecord(Resource.class), 0, "", 0, nodeLabels);
    nodeReports.add(nodeReport);
  }
  return nodeReports;
}
 
源代码26 项目: big-c   文件: JobImpl.java
@Override
public void transition(JobImpl job, JobEvent event) {
  JobUpdatedNodesEvent updateEvent = (JobUpdatedNodesEvent) event;
  for(NodeReport nr: updateEvent.getUpdatedNodes()) {
    NodeState nodeState = nr.getNodeState();
    if(nodeState.isUnusable()) {
      // act on the updates
      job.actOnUnusableNode(nr.getNodeId(), nodeState);
    }
  }
}
 
@Override
public String getClusterDescription() {

	try {
		ByteArrayOutputStream baos = new ByteArrayOutputStream();
		PrintStream ps = new PrintStream(baos);

		YarnClusterMetrics metrics = yarnClient.getYarnClusterMetrics();

		ps.append("NodeManagers in the ClusterClient " + metrics.getNumNodeManagers());
		List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);
		final String format = "|%-16s |%-16s %n";
		ps.printf("|Property         |Value          %n");
		ps.println("+---------------------------------------+");
		int totalMemory = 0;
		int totalCores = 0;
		for (NodeReport rep : nodes) {
			final Resource res = rep.getCapability();
			totalMemory += res.getMemory();
			totalCores += res.getVirtualCores();
			ps.format(format, "NodeID", rep.getNodeId());
			ps.format(format, "Memory", res.getMemory() + " MB");
			ps.format(format, "vCores", res.getVirtualCores());
			ps.format(format, "HealthReport", rep.getHealthReport());
			ps.format(format, "Containers", rep.getNumContainers());
			ps.println("+---------------------------------------+");
		}
		ps.println("Summary: totalMemory " + totalMemory + " totalCores " + totalCores);
		List<QueueInfo> qInfo = yarnClient.getAllQueues();
		for (QueueInfo q : qInfo) {
			ps.println("Queue: " + q.getQueueName() + ", Current Capacity: " + q.getCurrentCapacity() + " Max Capacity: " +
				q.getMaximumCapacity() + " Applications: " + q.getApplications().size());
		}
		return baos.toString();
	} catch (Exception e) {
		throw new RuntimeException("Couldn't get cluster description", e);
	}
}
 
源代码28 项目: flink   文件: AbstractYarnClusterDescriptor.java
@Override
public String getClusterDescription() {

	try {
		ByteArrayOutputStream baos = new ByteArrayOutputStream();
		PrintStream ps = new PrintStream(baos);

		YarnClusterMetrics metrics = yarnClient.getYarnClusterMetrics();

		ps.append("NodeManagers in the ClusterClient " + metrics.getNumNodeManagers());
		List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING);
		final String format = "|%-16s |%-16s %n";
		ps.printf("|Property         |Value          %n");
		ps.println("+---------------------------------------+");
		int totalMemory = 0;
		int totalCores = 0;
		for (NodeReport rep : nodes) {
			final Resource res = rep.getCapability();
			totalMemory += res.getMemory();
			totalCores += res.getVirtualCores();
			ps.format(format, "NodeID", rep.getNodeId());
			ps.format(format, "Memory", res.getMemory() + " MB");
			ps.format(format, "vCores", res.getVirtualCores());
			ps.format(format, "HealthReport", rep.getHealthReport());
			ps.format(format, "Containers", rep.getNumContainers());
			ps.println("+---------------------------------------+");
		}
		ps.println("Summary: totalMemory " + totalMemory + " totalCores " + totalCores);
		List<QueueInfo> qInfo = yarnClient.getAllQueues();
		for (QueueInfo q : qInfo) {
			ps.println("Queue: " + q.getQueueName() + ", Current Capacity: " + q.getCurrentCapacity() + " Max Capacity: " +
				q.getMaximumCapacity() + " Applications: " + q.getApplications().size());
		}
		return baos.toString();
	} catch (Exception e) {
		throw new RuntimeException("Couldn't get cluster description", e);
	}
}
 
源代码29 项目: hadoop   文件: NodeInfo.java
public FakeRMNodeImpl(NodeId nodeId, String nodeAddr, String httpAddress,
    Resource perNode, String rackName, String healthReport,
    int cmdPort, String hostName, NodeState state) {
  this.nodeId = nodeId;
  this.nodeAddr = nodeAddr;
  this.httpAddress = httpAddress;
  this.perNode = perNode;
  this.rackName = rackName;
  this.healthReport = healthReport;
  this.cmdPort = cmdPort;
  this.hostName = hostName;
  this.state = state;
  toCleanUpApplications = new ArrayList<ApplicationId>();
  toCleanUpContainers = new ArrayList<ContainerId>();
}
 
源代码30 项目: big-c   文件: TestAMRMClient.java
@BeforeClass
public static void setup() throws Exception {
  // start minicluster
  conf = new YarnConfiguration();
  conf.setLong(
    YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
    rolling_interval_sec);
  conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms);
  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100);
  conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
  yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1);
  yarnCluster.init(conf);
  yarnCluster.start();

  // start rm client
  yarnClient = YarnClient.createYarnClient();
  yarnClient.init(conf);
  yarnClient.start();

  // get node info
  nodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
  
  priority = Priority.newInstance(1);
  priority2 = Priority.newInstance(2);
  capability = Resource.newInstance(1024, 1);

  node = nodeReports.get(0).getNodeId().getHost();
  rack = nodeReports.get(0).getRackName();
  nodes = new String[]{ node };
  racks = new String[]{ rack };
}