类org.apache.hadoop.mapred.TaskReport源码实例Demo

下面列出了怎么用org.apache.hadoop.mapred.TaskReport的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestStreamingStatus.java
void validateTaskStatus(StreamJob job, TaskType type) throws IOException {
  // Map Task has 2 phases: map, sort
  // Reduce Task has 3 phases: copy, sort, reduce
  String finalPhaseInTask;
  TaskReport[] reports;
  if (type == TaskType.MAP) {
    reports = job.jc_.getMapTaskReports(job.jobId_);
    finalPhaseInTask = "sort";
  } else {// reduce task
    reports = job.jc_.getReduceTaskReports(job.jobId_);
    finalPhaseInTask = "reduce";
  }
  assertEquals(1, reports.length);
  assertEquals(expectedStatus + " > " + finalPhaseInTask,
      reports[0].getState());
}
 
源代码2 项目: big-c   文件: TestStreamingStatus.java
void validateTaskStatus(StreamJob job, TaskType type) throws IOException {
  // Map Task has 2 phases: map, sort
  // Reduce Task has 3 phases: copy, sort, reduce
  String finalPhaseInTask;
  TaskReport[] reports;
  if (type == TaskType.MAP) {
    reports = job.jc_.getMapTaskReports(job.jobId_);
    finalPhaseInTask = "sort";
  } else {// reduce task
    reports = job.jc_.getReduceTaskReports(job.jobId_);
    finalPhaseInTask = "reduce";
  }
  assertEquals(1, reports.length);
  assertEquals(expectedStatus + " > " + finalPhaseInTask,
      reports[0].getState());
}
 
源代码3 项目: spork   文件: MRJobStats.java
private TaskStat getTaskStat(Iterator<TaskReport> tasks) {
    int size = 0;
    long max = 0;
    long min = Long.MAX_VALUE;
    long median = 0;
    long total = 0;
    List<Long> durations = new ArrayList<Long>();

    while(tasks.hasNext()){
        TaskReport rpt = tasks.next();
        long duration = rpt.getFinishTime() - rpt.getStartTime();
        durations.add(duration);
        max = (duration > max) ? duration : max;
        min = (duration < min) ? duration : min;
        total += duration;
        size++;
    }
    long avg = total / size;

    median = calculateMedianValue(durations);

    return new TaskStat(size, max, min, avg, median);
}
 
源代码4 项目: spork   文件: TestMRJobStats.java
@BeforeClass
public static void oneTimeSetup() throws Exception {

    // setting up TaskReport for map tasks
    for (int i = 0; i < mapTaskReports.length; i++) {
        mapTaskReports[i] = Mockito.mock(TaskReport.class);
        Mockito.when(mapTaskReports[i].getStartTime()).thenReturn(MAP_START_FINISH_TIME_DATA[i][0] * ONE_THOUSAND);
        Mockito.when(mapTaskReports[i].getFinishTime()).thenReturn(MAP_START_FINISH_TIME_DATA[i][1] * ONE_THOUSAND);
    }

    // setting up TaskReport for reduce tasks
    for (int i = 0; i < reduceTaskReports.length; i++) {
        reduceTaskReports[i] = Mockito.mock(TaskReport.class);
        Mockito.when(reduceTaskReports[i].getStartTime()).thenReturn(REDUCE_START_FINISH_TIME_DATA[i][0] * ONE_THOUSAND);
        Mockito.when(reduceTaskReports[i].getFinishTime()).thenReturn(REDUCE_START_FINISH_TIME_DATA[i][1] * ONE_THOUSAND);
    }

    StringBuilder sb = new StringBuilder();
    sb.append(jobID.toString()).append("\t");
    sb.append(mapTaskReports.length).append("\t");
    sb.append(reduceTaskReports.length).append("\t");

    sb.append("500\t100\t300\t300\t500\t100\t240\t200");
    ASSERT_STRING = sb.toString();
}
 
源代码5 项目: spork   文件: HadoopShims.java
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
    if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
        LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
        return null;
    }
    Cluster cluster = new Cluster(job.getJobConf());
    try {
        org.apache.hadoop.mapreduce.Job mrJob = cluster.getJob(job.getAssignedJobID());
        if (mrJob == null) { // In local mode, mrJob will be null
            mrJob = job.getJob();
        }
        org.apache.hadoop.mapreduce.TaskReport[] reports = mrJob.getTaskReports(type);
        return DowngradeHelper.downgradeTaskReports(reports);
    } catch (InterruptedException ir) {
        throw new IOException(ir);
    }
}
 
源代码6 项目: spork   文件: Launcher.java
protected long computeTimeSpent(Iterator<TaskReport> taskReports) {
    long timeSpent = 0;
    while (taskReports.hasNext()) {
        TaskReport r = taskReports.next();
        timeSpent += (r.getFinishTime() - r.getStartTime());
    }
    return timeSpent;
}
 
源代码7 项目: spork   文件: TestMRJobStats.java
@Test
public void testOneTaskReport() throws Exception {
    // setting up one map task report
    TaskReport[] mapTaskReports = new TaskReport[1];
    mapTaskReports[0] = Mockito.mock(TaskReport.class);
    Mockito.when(mapTaskReports[0].getStartTime()).thenReturn(300L * ONE_THOUSAND);
    Mockito.when(mapTaskReports[0].getFinishTime()).thenReturn(400L * ONE_THOUSAND);

    // setting up one reduce task report
    TaskReport[] reduceTaskReports = new TaskReport[1];
    reduceTaskReports[0] = Mockito.mock(TaskReport.class);
    Mockito.when(reduceTaskReports[0].getStartTime()).thenReturn(500L * ONE_THOUSAND);
    Mockito.when(reduceTaskReports[0].getFinishTime()).thenReturn(700L * ONE_THOUSAND);

    PigStats.JobGraph jobGraph = new PigStats.JobGraph();
    MRJobStats jobStats = createJobStats("JobStatsTest", jobGraph);
    getJobStatsMethod("setId", JobID.class).invoke(jobStats, jobID);
    jobStats.setSuccessful(true);

    getJobStatsMethod("addMapReduceStatistics", Iterator.class, Iterator.class)
        .invoke(jobStats, Arrays.asList(mapTaskReports).iterator(), Arrays.asList(reduceTaskReports).iterator());
    String msg = (String)getJobStatsMethod("getDisplayString")
        .invoke(jobStats);
    System.out.println(JobStats.SUCCESS_HEADER);
    System.out.println(msg);

    StringBuilder sb = new StringBuilder();
    sb.append(jobID.toString()).append("\t");
    sb.append(mapTaskReports.length).append("\t");
    sb.append(reduceTaskReports.length).append("\t");
    sb.append("100\t100\t100\t100\t200\t200\t200\t200");

    System.out.println("assert msg: " + sb.toString());
    assertTrue(msg.startsWith(sb.toString()));

}
 
源代码8 项目: spork   文件: HadoopShims.java
public static boolean isJobFailed(TaskReport report) {
    float successfulProgress = 1.0f;
    // if the progress reported is not 1.0f then the map or reduce
    // job failed
    // this comparison is in place for the backward compatibility
    // for Hadoop 0.20
    return report.getProgress() != successfulProgress;
}
 
源代码9 项目: spork   文件: HadoopShims.java
public static Iterator<TaskReport> getTaskReports(Job job, TaskType type) throws IOException {
    if (job.getJobConf().getBoolean(PigConfiguration.PIG_NO_TASK_REPORT, false)) {
        LOG.info("TaskReports are disabled for job: " + job.getAssignedJobID());
        return null;
    }
    JobClient jobClient = job.getJobClient();
    TaskReport[] reports = null;
    if (type == TaskType.MAP) {
        reports = jobClient.getMapTaskReports(job.getAssignedJobID());
    } else {
        reports = jobClient.getReduceTaskReports(job.getAssignedJobID());
    }
    return reports == null ? null : Arrays.asList(reports).iterator();
}
 
源代码10 项目: hadoop   文件: TestJobMonitorAndPrint.java
@Test
public void testJobMonitorAndPrint() throws Exception {
  JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
      0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
      "tmp-queue", "tmp-jobfile", "tmp-url", true);
  JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
      1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
      "tmp-queue", "tmp-jobfile", "tmp-url", true);

  doAnswer(
      new Answer<TaskCompletionEvent[]>() {
        @Override
        public TaskCompletionEvent[] answer(InvocationOnMock invocation)
            throws Throwable {
          return new TaskCompletionEvent[0];
        }
      }
      ).when(job).getTaskCompletionEvents(anyInt(), anyInt());

  doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
  when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
  // setup the logger to capture all logs
  Layout layout =
      Logger.getRootLogger().getAppender("stdout").getLayout();
  ByteArrayOutputStream os = new ByteArrayOutputStream();
  WriterAppender appender = new WriterAppender(layout, os);
  appender.setThreshold(Level.ALL);
  Logger qlogger = Logger.getLogger(Job.class);
  qlogger.addAppender(appender);

  job.monitorAndPrintJob();

  qlogger.removeAppender(appender);
  LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
  String line;
  boolean foundHundred = false;
  boolean foundComplete = false;
  boolean foundUber = false;
  String uberModeMatch = "uber mode : true";
  String progressMatch = "map 100% reduce 100%";
  String completionMatch = "completed successfully";
  while ((line = r.readLine()) != null) {
    if (line.contains(uberModeMatch)) {
      foundUber = true;
    }
    foundHundred = line.contains(progressMatch);      
    if (foundHundred)
      break;
  }
  line = r.readLine();
  foundComplete = line.contains(completionMatch);
  assertTrue(foundUber);
  assertTrue(foundHundred);
  assertTrue(foundComplete);

  System.out.println("The output of job.toString() is : \n" + job.toString());
  assertTrue(job.toString().contains("Number of maps: 5\n"));
  assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
 
源代码11 项目: big-c   文件: TestJobMonitorAndPrint.java
@Test
public void testJobMonitorAndPrint() throws Exception {
  JobStatus jobStatus_1 = new JobStatus(new JobID("job_000", 1), 1f, 0.1f,
      0.1f, 0f, State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
      "tmp-queue", "tmp-jobfile", "tmp-url", true);
  JobStatus jobStatus_2 = new JobStatus(new JobID("job_000", 1), 1f, 1f,
      1f, 1f, State.SUCCEEDED, JobPriority.HIGH, "tmp-user", "tmp-jobname",
      "tmp-queue", "tmp-jobfile", "tmp-url", true);

  doAnswer(
      new Answer<TaskCompletionEvent[]>() {
        @Override
        public TaskCompletionEvent[] answer(InvocationOnMock invocation)
            throws Throwable {
          return new TaskCompletionEvent[0];
        }
      }
      ).when(job).getTaskCompletionEvents(anyInt(), anyInt());

  doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
  when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1, jobStatus_2);
  // setup the logger to capture all logs
  Layout layout =
      Logger.getRootLogger().getAppender("stdout").getLayout();
  ByteArrayOutputStream os = new ByteArrayOutputStream();
  WriterAppender appender = new WriterAppender(layout, os);
  appender.setThreshold(Level.ALL);
  Logger qlogger = Logger.getLogger(Job.class);
  qlogger.addAppender(appender);

  job.monitorAndPrintJob();

  qlogger.removeAppender(appender);
  LineNumberReader r = new LineNumberReader(new StringReader(os.toString()));
  String line;
  boolean foundHundred = false;
  boolean foundComplete = false;
  boolean foundUber = false;
  String uberModeMatch = "uber mode : true";
  String progressMatch = "map 100% reduce 100%";
  String completionMatch = "completed successfully";
  while ((line = r.readLine()) != null) {
    if (line.contains(uberModeMatch)) {
      foundUber = true;
    }
    foundHundred = line.contains(progressMatch);      
    if (foundHundred)
      break;
  }
  line = r.readLine();
  foundComplete = line.contains(completionMatch);
  assertTrue(foundUber);
  assertTrue(foundHundred);
  assertTrue(foundComplete);

  System.out.println("The output of job.toString() is : \n" + job.toString());
  assertTrue(job.toString().contains("Number of maps: 5\n"));
  assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
 
源代码12 项目: spork   文件: HadoopShims.java
public static boolean isJobFailed(TaskReport report) {
    return report.getCurrentStatus()==TIPStatus.FAILED;
}
 
 类所在包
 类方法
 同包方法