类org.apache.hadoop.mapreduce.TaskReport源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.TaskReport的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: CLI.java
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
 
源代码2 项目: big-c   文件: CLI.java
/**
 * Display the information about a job's tasks, of a particular type and
 * in a particular state
 * 
 * @param job the job
 * @param type the type of the task (map/reduce/setup/cleanup)
 * @param state the state of the task 
 * (pending/running/completed/failed/killed)
 */
protected void displayTasks(Job job, String type, String state) 
throws IOException, InterruptedException {
  TaskReport[] reports = job.getTaskReports(TaskType.valueOf(
      org.apache.hadoop.util.StringUtils.toUpperCase(type)));
  for (TaskReport report : reports) {
    TIPStatus status = report.getCurrentStatus();
    if ((state.equalsIgnoreCase("pending") && status ==TIPStatus.PENDING) ||
        (state.equalsIgnoreCase("running") && status ==TIPStatus.RUNNING) ||
        (state.equalsIgnoreCase("completed") && status == TIPStatus.COMPLETE) ||
        (state.equalsIgnoreCase("failed") && status == TIPStatus.FAILED) ||
        (state.equalsIgnoreCase("killed") && status == TIPStatus.KILLED)) {
      printTaskAttempts(report);
    }
  }
}
 
源代码3 项目: sequenceiq-samples   文件: MRJobStatus.java
public JobStatus printJobStatus(YARNRunner yarnRunner, JobID jobID) throws IOException, InterruptedException {
	JobStatus jobStatus;
	jobStatus = yarnRunner.getJobStatus(jobID);
	
	// print overall job M/R progresses
	LOGGER.info("\nJob " + jobStatus.getJobName() + "in queue (" + jobStatus.getQueue() + ")" + " progress M/R: " + jobStatus.getMapProgress() + "/" + jobStatus.getReduceProgress());
	LOGGER.info("Tracking URL : " + jobStatus.getTrackingUrl());
	LOGGER.info("Reserved memory : " + jobStatus.getReservedMem() + ", used memory : "+ jobStatus.getUsedMem() + " and used slots : "+ jobStatus.getNumUsedSlots());
	
	// list map & reduce tasks statuses and progress		
	TaskReport[] reports = yarnRunner.getTaskReports(jobID, TaskType.MAP);
	for (int i = 0; i < reports.length; i++) {
		LOGGER.info("MAP: Status " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); 
	}
	reports = yarnRunner.getTaskReports(jobID, TaskType.REDUCE);
	for (int i = 0; i < reports.length; i++) {
		LOGGER.info("REDUCE: " + reports[i].getCurrentStatus() + " with task ID " + reports[i].getTaskID() + ", and progress " + reports[i].getProgress()); 
	}
	return jobStatus;
}
 
源代码4 项目: incubator-retired-blur   文件: IndexerJobDriver.java
PartitionedInputResult(Path partitionedInputData, Counters counters, int shards, TaskReport[] taskReports) {
  _partitionedInputData = partitionedInputData;
  _counters = counters;
  _rowIdsFromNewData = new long[shards];
  _rowIdsToUpdateFromNewData = new long[shards];
  _rowIdsFromIndex = new long[shards];
  for (TaskReport tr : taskReports) {
    int id = tr.getTaskID().getId();
    Counters taskCounters = tr.getTaskCounters();
    Counter total = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_NEW_DATA);
    _rowIdsFromNewData[id] = total.getValue();
    Counter update = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_TO_UPDATE_FROM_NEW_DATA);
    _rowIdsToUpdateFromNewData[id] = update.getValue();
    Counter index = taskCounters.findCounter(BlurIndexCounter.ROW_IDS_FROM_INDEX);
    _rowIdsFromIndex[id] = index.getValue();
  }
}
 
源代码5 项目: hadoop   文件: JobClientUnitTest.java
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码6 项目: hadoop   文件: JobClientUnitTest.java
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码7 项目: hadoop   文件: JobClientUnitTest.java
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码8 项目: hadoop   文件: JobClientUnitTest.java
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码9 项目: hadoop   文件: CLI.java
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
 
源代码10 项目: big-c   文件: JobClientUnitTest.java
@Test
public void testMapTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getMapTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码11 项目: big-c   文件: JobClientUnitTest.java
@Test
public void testReduceTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getReduceTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码12 项目: big-c   文件: JobClientUnitTest.java
@Test
public void testSetupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getSetupTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码13 项目: big-c   文件: JobClientUnitTest.java
@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());
  Cluster mockCluster = mock(Cluster.class);
  client.setCluster(mockCluster);
  JobID id = new JobID("test",0);
  
  when(mockCluster.getJob(id)).thenReturn(null);
  
  TaskReport[] result = client.getCleanupTaskReports(id);
  assertEquals(0, result.length);
  
  verify(mockCluster).getJob(id);
}
 
源代码14 项目: big-c   文件: CLI.java
private void printTaskAttempts(TaskReport report) {
  if (report.getCurrentStatus() == TIPStatus.COMPLETE) {
    System.out.println(report.getSuccessfulTaskAttemptId());
  } else if (report.getCurrentStatus() == TIPStatus.RUNNING) {
    for (TaskAttemptID t : 
      report.getRunningTaskAttemptIds()) {
      System.out.println(t);
    }
  }
}
 
源代码15 项目: indexr   文件: UpdateColumnJob.java
public boolean doRun(Config upcolConfig) throws Exception {
    JobConf jobConf = new JobConf(getConf(), UpdateColumnJob.class);
    jobConf.setKeepFailedTaskFiles(false);
    jobConf.setNumReduceTasks(0);
    String jobName = String.format("indexr-upcol-%s-%s-%s",
            upcolConfig.table,
            LocalDateTime.now().format(timeFormatter),
            RandomStringUtils.randomAlphabetic(5));
    jobConf.setJobName(jobName);
    jobConf.set(CONFKEY, JsonUtil.toJson(upcolConfig));
    Path workDir = new Path(jobConf.getWorkingDirectory(), jobName);
    jobConf.setWorkingDirectory(workDir);

    Job job = Job.getInstance(jobConf);
    job.setInputFormatClass(SegmentInputFormat.class);
    job.setMapperClass(UpColSegmentMapper.class);
    job.setJarByClass(UpdateColumnJob.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    job.setMapSpeculativeExecution(false);
    job.setOutputFormatClass(UpColSegmentOutputFormat.class);

    job.submit();
    boolean ok = job.waitForCompletion(true);
    if (!ok) {
        TaskReport[] reports = job.getTaskReports(TaskType.MAP);
        if (reports != null) {
            for (TaskReport report : reports) {
                log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics()));
            }
        }
    }
    return ok;
}
 
源代码16 项目: hadoop   文件: YARNRunner.java
@Override
public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
throws IOException, InterruptedException {
  return clientCache.getClient(jobID)
      .getTaskReports(jobID, taskType);
}
 
源代码17 项目: hadoop   文件: JobClientUnitTest.java
@Test
public void testShowJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());

  long startTime = System.currentTimeMillis();

  JobID jobID = new JobID(String.valueOf(startTime), 12345);

  JobStatus mockJobStatus = mock(JobStatus.class);
  when(mockJobStatus.getJobID()).thenReturn(jobID);
  when(mockJobStatus.getJobName()).thenReturn(jobID.toString());
  when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING);
  when(mockJobStatus.getStartTime()).thenReturn(startTime);
  when(mockJobStatus.getUsername()).thenReturn("mockuser");
  when(mockJobStatus.getQueue()).thenReturn("mockqueue");
  when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL);
  when(mockJobStatus.getNumUsedSlots()).thenReturn(1);
  when(mockJobStatus.getNumReservedSlots()).thenReturn(1);
  when(mockJobStatus.getUsedMem()).thenReturn(1024);
  when(mockJobStatus.getReservedMem()).thenReturn(512);
  when(mockJobStatus.getNeededMem()).thenReturn(2048);
  when(mockJobStatus.getSchedulingInfo()).thenReturn("NA");

  Job mockJob = mock(Job.class);
  when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn(
    new TaskReport[5]);

  Cluster mockCluster = mock(Cluster.class);
  when(mockCluster.getJob(jobID)).thenReturn(mockJob);

  client.setCluster(mockCluster);
  
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  client.displayJobList(new JobStatus[] {mockJobStatus}, new PrintWriter(out));
  String commandLineOutput = out.toString();
  System.out.println(commandLineOutput);
  Assert.assertTrue(commandLineOutput.contains("Total jobs:1"));

  verify(mockJobStatus, atLeastOnce()).getJobID();
  verify(mockJobStatus).getState();
  verify(mockJobStatus).getStartTime();
  verify(mockJobStatus).getUsername();
  verify(mockJobStatus).getQueue();
  verify(mockJobStatus).getPriority();
  verify(mockJobStatus).getNumUsedSlots();
  verify(mockJobStatus).getNumReservedSlots();
  verify(mockJobStatus).getUsedMem();
  verify(mockJobStatus).getReservedMem();
  verify(mockJobStatus).getNeededMem();
  verify(mockJobStatus).getSchedulingInfo();

  // This call should not go to each AM.
  verify(mockCluster, never()).getJob(jobID);
  verify(mockJob, never()).getTaskReports(isA(TaskType.class));
}
 
源代码18 项目: hadoop   文件: TestCLI.java
private TaskReport[] getTaskReports(JobID jobId, TaskType type) {
  return new TaskReport[] { new TaskReport(), new TaskReport() };
}
 
源代码19 项目: big-c   文件: YARNRunner.java
@Override
public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
throws IOException, InterruptedException {
  return clientCache.getClient(jobID)
      .getTaskReports(jobID, taskType);
}
 
源代码20 项目: big-c   文件: JobClientUnitTest.java
@Test
public void testShowJob() throws Exception {
  TestJobClient client = new TestJobClient(new JobConf());

  long startTime = System.currentTimeMillis();

  JobID jobID = new JobID(String.valueOf(startTime), 12345);

  JobStatus mockJobStatus = mock(JobStatus.class);
  when(mockJobStatus.getJobID()).thenReturn(jobID);
  when(mockJobStatus.getJobName()).thenReturn(jobID.toString());
  when(mockJobStatus.getState()).thenReturn(JobStatus.State.RUNNING);
  when(mockJobStatus.getStartTime()).thenReturn(startTime);
  when(mockJobStatus.getUsername()).thenReturn("mockuser");
  when(mockJobStatus.getQueue()).thenReturn("mockqueue");
  when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL);
  when(mockJobStatus.getNumUsedSlots()).thenReturn(1);
  when(mockJobStatus.getNumReservedSlots()).thenReturn(1);
  when(mockJobStatus.getUsedMem()).thenReturn(1024);
  when(mockJobStatus.getReservedMem()).thenReturn(512);
  when(mockJobStatus.getNeededMem()).thenReturn(2048);
  when(mockJobStatus.getSchedulingInfo()).thenReturn("NA");

  Job mockJob = mock(Job.class);
  when(mockJob.getTaskReports(isA(TaskType.class))).thenReturn(
    new TaskReport[5]);

  Cluster mockCluster = mock(Cluster.class);
  when(mockCluster.getJob(jobID)).thenReturn(mockJob);

  client.setCluster(mockCluster);
  
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  client.displayJobList(new JobStatus[] {mockJobStatus}, new PrintWriter(out));
  String commandLineOutput = out.toString();
  System.out.println(commandLineOutput);
  Assert.assertTrue(commandLineOutput.contains("Total jobs:1"));

  verify(mockJobStatus, atLeastOnce()).getJobID();
  verify(mockJobStatus).getState();
  verify(mockJobStatus).getStartTime();
  verify(mockJobStatus).getUsername();
  verify(mockJobStatus).getQueue();
  verify(mockJobStatus).getPriority();
  verify(mockJobStatus).getNumUsedSlots();
  verify(mockJobStatus).getNumReservedSlots();
  verify(mockJobStatus).getUsedMem();
  verify(mockJobStatus).getReservedMem();
  verify(mockJobStatus).getNeededMem();
  verify(mockJobStatus).getSchedulingInfo();

  // This call should not go to each AM.
  verify(mockCluster, never()).getJob(jobID);
  verify(mockJob, never()).getTaskReports(isA(TaskType.class));
}
 
源代码21 项目: big-c   文件: TestCLI.java
private TaskReport[] getTaskReports(JobID jobId, TaskType type) {
  return new TaskReport[] { new TaskReport(), new TaskReport() };
}
 
源代码22 项目: ignite   文件: HadoopClientProtocol.java
/** {@inheritDoc} */
@Override public TaskReport[] getTaskReports(JobID jobid, TaskType type) throws IOException, InterruptedException {
    return new TaskReport[0];
}
 
源代码23 项目: RDFS   文件: MockSimulatorJobTracker.java
@Override
public TaskReport[] getTaskReports(JobID jobid, TaskType type)
    throws IOException, InterruptedException {
  throw new UnsupportedOperationException();
}
 
源代码24 项目: incubator-tez   文件: ClientServiceDelegate.java
public TaskReport[] getTaskReports(
    JobID oldJobID, TaskType taskType)
     throws IOException{
  // TEZ-146: need to return real task reports
  return new TaskReport[0];
}
 
源代码25 项目: incubator-tez   文件: YARNRunner.java
@Override
public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
throws IOException, InterruptedException {
  return clientCache.getClient(jobID)
      .getTaskReports(jobID, taskType);
}
 
源代码26 项目: tez   文件: ClientServiceDelegate.java
public TaskReport[] getTaskReports(
    JobID oldJobID, TaskType taskType)
     throws IOException{
  // TEZ-146: need to return real task reports
  return new TaskReport[0];
}
 
源代码27 项目: tez   文件: YARNRunner.java
@Override
public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
throws IOException, InterruptedException {
  return clientCache.getClient(jobID)
      .getTaskReports(jobID, taskType);
}
 
源代码28 项目: hadoop   文件: ClientProtocol.java
/**
 * Grab a bunch of info on the tasks that make up the job
 */
public TaskReport[] getTaskReports(JobID jobid, TaskType type)
throws IOException, InterruptedException;
 
源代码29 项目: big-c   文件: ClientProtocol.java
/**
 * Grab a bunch of info on the tasks that make up the job
 */
public TaskReport[] getTaskReports(JobID jobid, TaskType type)
throws IOException, InterruptedException;
 
 同包方法