类org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestCompletedTask.java
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
  
  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);
  
  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
  
  
  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
 
源代码2 项目: big-c   文件: TestCompletedTask.java
/**
 * test some methods of CompletedTaskAttempt
 */
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
  
  TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
  when(attemptInfo.getRackname()).thenReturn("Rackname");
  when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
  when(attemptInfo.getSortFinishTime()).thenReturn(12L);
  when(attemptInfo.getShufflePort()).thenReturn(10);
  
  JobID jobId= new JobID("12345",0);
  TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
  TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
  when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
  
  
  CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
  assertEquals( "Rackname",   taskAttemt.getNodeRackName());
  assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
  assertTrue(  taskAttemt.isFinished());
  assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
  assertEquals( 12L,   taskAttemt.getSortFinishTime());
  assertEquals( 10,   taskAttemt.getShufflePort());
}
 
源代码3 项目: jumbune   文件: YarnJobStatsUtility.java
/**
 * This method is responsible for populating the setup phase details.
 * @return TaskOutputDetails contains the details of the set up phase.
 */
private PhaseDetails prepareSetupDetails(JobInfo jobInfo,Map<TaskAttemptID, TaskAttemptInfo> tasks){
	PhaseDetails phaseDetails = new PhaseDetails();
	List<TaskOutputDetails> taskOutputDetails = new ArrayList<TaskOutputDetails>();
	TaskOutputDetails tod;
	tod = new TaskOutputDetails();
	tod.setTaskType("SETUP");
	tod.setTaskID("Setup");
	for (Map.Entry<TaskAttemptID, TaskAttemptInfo> task : tasks
			.entrySet()) {
		TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
		tod.setLocation(taskAttemptInfo.getHostname());
	}
	long startPoint = jobInfo.getSubmitTime();
	tod.setStartPoint(0);
	long endPoint = (jobInfo.getLaunchTime()-startPoint) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
	tod.setEndPoint(endPoint);
	tod.setDataFlowRate(0);
	taskOutputDetails.add(tod);
	phaseDetails.setTaskOutputDetails(taskOutputDetails);
	phaseDetails.setAvgDataFlowRate(0);
	return phaseDetails;
}
 
源代码4 项目: jumbune   文件: YarnJobStatsUtility.java
/**
 * This method is responsible for populating the clean up phase details.
 * @return TaskOutputDetails contains the details of the clean up phase.
 */
private PhaseDetails prepareCleanupDetails(JobInfo jobInfo, Map<TaskAttemptID, TaskAttemptInfo> tasks){
	PhaseDetails phaseDetails = new PhaseDetails();
	List<TaskOutputDetails> cleanupTaskOuptputDetails = new ArrayList<TaskOutputDetails>();
	TaskOutputDetails taskOutputDetails = new TaskOutputDetails();
	taskOutputDetails.setTaskType("CLEANUP");
	taskOutputDetails.setTaskID("Cleanup");
	for (Map.Entry<TaskAttemptID, TaskAttemptInfo> task : tasks
			.entrySet()) {
		TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
		taskOutputDetails.setLocation(taskAttemptInfo.getHostname());
	}
	long startPoint = getMaxReduceTime(tasks,jobInfo.getSubmitTime());
	taskOutputDetails.setStartPoint(startPoint);
	LOGGER.debug("Clean up start time" + taskOutputDetails.getStartPoint());
	long endPoint = (jobInfo.getFinishTime() - jobInfo.getSubmitTime())/CONVERSION_FACTOR_MILLISECS_TO_SECS;
	taskOutputDetails.setEndPoint(endPoint);
	LOGGER.debug("Clean up end time" + taskOutputDetails.getEndPoint());
	taskOutputDetails.setDataFlowRate(0);
	cleanupTaskOuptputDetails.add(taskOutputDetails);
	phaseDetails.setTaskOutputDetails(cleanupTaskOuptputDetails);
	phaseDetails.setAvgDataFlowRate(0);
	return phaseDetails;
}
 
源代码5 项目: hadoop   文件: TaskAttemptRecoverEvent.java
public TaskAttemptRecoverEvent(TaskAttemptId id, TaskAttemptInfo taInfo,
    OutputCommitter committer, boolean recoverOutput) {
  super(id, TaskAttemptEventType.TA_RECOVER);
  this.taInfo = taInfo;
  this.committer = committer;
  this.recoverAttemptOutput = recoverOutput;
}
 
源代码6 项目: hadoop   文件: TestRecovery.java
private TaskAttemptInfo getMockTaskAttemptInfo(TaskAttemptID tai,
    TaskAttemptState tas) {

  ContainerId ci = mock(ContainerId.class);
  Counters counters = mock(Counters.class);
  TaskType tt = TaskType.MAP;

  long finishTime = System.currentTimeMillis();

  TaskAttemptInfo mockTAinfo = mock(TaskAttemptInfo.class);

  when(mockTAinfo.getAttemptId()).thenReturn(tai);
  when(mockTAinfo.getContainerId()).thenReturn(ci);
  when(mockTAinfo.getCounters()).thenReturn(counters);
  when(mockTAinfo.getError()).thenReturn("");
  when(mockTAinfo.getFinishTime()).thenReturn(finishTime);
  when(mockTAinfo.getHostname()).thenReturn("localhost");
  when(mockTAinfo.getHttpPort()).thenReturn(23);
  when(mockTAinfo.getMapFinishTime()).thenReturn(finishTime - 1000L);
  when(mockTAinfo.getPort()).thenReturn(24);
  when(mockTAinfo.getRackname()).thenReturn("defaultRack");
  when(mockTAinfo.getShuffleFinishTime()).thenReturn(finishTime - 2000L);
  when(mockTAinfo.getShufflePort()).thenReturn(25);
  when(mockTAinfo.getSortFinishTime()).thenReturn(finishTime - 3000L);
  when(mockTAinfo.getStartTime()).thenReturn(finishTime -10000);
  when(mockTAinfo.getState()).thenReturn("task in progress");
  when(mockTAinfo.getTaskStatus()).thenReturn(tas.toString());
  when(mockTAinfo.getTaskType()).thenReturn(tt);
  when(mockTAinfo.getTrackerName()).thenReturn("TrackerName");
  return mockTAinfo;
}
 
源代码7 项目: hadoop   文件: CompletedTask.java
private void loadAllTaskAttempts() {
  if (taskAttemptsLoaded.get()) {
    return;
  }
  taskAttemptsLock.lock();
  try {
    if (taskAttemptsLoaded.get()) {
      return;
    }

    for (TaskAttemptInfo attemptHistory : taskInfo.getAllTaskAttempts()
        .values()) {
      CompletedTaskAttempt attempt =
          new CompletedTaskAttempt(taskId, attemptHistory);
      reportDiagnostics.addAll(attempt.getDiagnostics());
      attempts.put(attempt.getID(), attempt);
      if (successfulAttempt == null
          && attemptHistory.getTaskStatus() != null
          && attemptHistory.getTaskStatus().equals(
              TaskState.SUCCEEDED.toString())) {
        successfulAttempt =
            TypeConverter.toYarn(attemptHistory.getAttemptId());
      }
    }
    taskAttemptsLoaded.set(true);
  } finally {
    taskAttemptsLock.unlock();
  }
}
 
源代码8 项目: hadoop   文件: CompletedTaskAttempt.java
CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
  this.attemptInfo = attemptInfo;
  this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
  if (attemptInfo.getTaskStatus() != null) {
    this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
  } else {
    this.state = TaskAttemptState.KILLED;
    localDiagMessage = "Attmpt state missing from History : marked as KILLED";
    diagnostics.add(localDiagMessage);
  }
  if (attemptInfo.getError() != null) {
    diagnostics.add(attemptInfo.getError());
  }
}
 
源代码9 项目: big-c   文件: TaskAttemptRecoverEvent.java
public TaskAttemptRecoverEvent(TaskAttemptId id, TaskAttemptInfo taInfo,
    OutputCommitter committer, boolean recoverOutput) {
  super(id, TaskAttemptEventType.TA_RECOVER);
  this.taInfo = taInfo;
  this.committer = committer;
  this.recoverAttemptOutput = recoverOutput;
}
 
源代码10 项目: big-c   文件: TestRecovery.java
private TaskAttemptInfo getMockTaskAttemptInfo(TaskAttemptID tai,
    TaskAttemptState tas) {

  ContainerId ci = mock(ContainerId.class);
  Counters counters = mock(Counters.class);
  TaskType tt = TaskType.MAP;

  long finishTime = System.currentTimeMillis();

  TaskAttemptInfo mockTAinfo = mock(TaskAttemptInfo.class);

  when(mockTAinfo.getAttemptId()).thenReturn(tai);
  when(mockTAinfo.getContainerId()).thenReturn(ci);
  when(mockTAinfo.getCounters()).thenReturn(counters);
  when(mockTAinfo.getError()).thenReturn("");
  when(mockTAinfo.getFinishTime()).thenReturn(finishTime);
  when(mockTAinfo.getHostname()).thenReturn("localhost");
  when(mockTAinfo.getHttpPort()).thenReturn(23);
  when(mockTAinfo.getMapFinishTime()).thenReturn(finishTime - 1000L);
  when(mockTAinfo.getPort()).thenReturn(24);
  when(mockTAinfo.getRackname()).thenReturn("defaultRack");
  when(mockTAinfo.getShuffleFinishTime()).thenReturn(finishTime - 2000L);
  when(mockTAinfo.getShufflePort()).thenReturn(25);
  when(mockTAinfo.getSortFinishTime()).thenReturn(finishTime - 3000L);
  when(mockTAinfo.getStartTime()).thenReturn(finishTime -10000);
  when(mockTAinfo.getState()).thenReturn("task in progress");
  when(mockTAinfo.getTaskStatus()).thenReturn(tas.toString());
  when(mockTAinfo.getTaskType()).thenReturn(tt);
  when(mockTAinfo.getTrackerName()).thenReturn("TrackerName");
  return mockTAinfo;
}
 
源代码11 项目: big-c   文件: CompletedTask.java
private void loadAllTaskAttempts() {
  if (taskAttemptsLoaded.get()) {
    return;
  }
  taskAttemptsLock.lock();
  try {
    if (taskAttemptsLoaded.get()) {
      return;
    }

    for (TaskAttemptInfo attemptHistory : taskInfo.getAllTaskAttempts()
        .values()) {
      CompletedTaskAttempt attempt =
          new CompletedTaskAttempt(taskId, attemptHistory);
      reportDiagnostics.addAll(attempt.getDiagnostics());
      attempts.put(attempt.getID(), attempt);
      if (successfulAttempt == null
          && attemptHistory.getTaskStatus() != null
          && attemptHistory.getTaskStatus().equals(
              TaskState.SUCCEEDED.toString())) {
        successfulAttempt =
            TypeConverter.toYarn(attemptHistory.getAttemptId());
      }
    }
    taskAttemptsLoaded.set(true);
  } finally {
    taskAttemptsLock.unlock();
  }
}
 
源代码12 项目: big-c   文件: CompletedTaskAttempt.java
CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
  this.attemptInfo = attemptInfo;
  this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
  if (attemptInfo.getTaskStatus() != null) {
    this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
  } else {
    this.state = TaskAttemptState.KILLED;
    localDiagMessage = "Attmpt state missing from History : marked as KILLED";
    diagnostics.add(localDiagMessage);
  }
  if (attemptInfo.getError() != null) {
    diagnostics.add(attemptInfo.getError());
  }
}
 
源代码13 项目: jumbune   文件: YarnJobStatsUtility.java
/**
 * *  
 * @return This method returns the maximum time taken by reduce.
 */
@SuppressWarnings("deprecation")
private long getMaxReduceTime(Map<TaskAttemptID, TaskAttemptInfo> tasks, long referencedZeroTime) {
long maximumReduceTime = 0l;
for (Map.Entry<TaskAttemptID, TaskAttemptInfo> task : tasks
		.entrySet()) {
	if(!task.getKey().isMap()){
		TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
		long reduceEnd = (taskAttemptInfo.getFinishTime()-referencedZeroTime)/CONVERSION_FACTOR_MILLISECS_TO_SECS;
		maximumReduceTime = reduceEnd > maximumReduceTime ? reduceEnd : maximumReduceTime;
	}}
return maximumReduceTime;
}
 
源代码14 项目: jumbune   文件: YarnJobStatsUtility.java
/**
 * Adds detail for a Map phase.
 * @param task2 
 *
 * @param task2 the tasks
 * @param referencedZeroTime 
 * @param referencedZeroTime the start time
 * @return the phase details
 */
private TaskOutputDetails addMapPhaseDetails(Entry<TaskAttemptID, TaskAttemptInfo> task, long referencedZeroTime) {
				
		TaskAttemptInfo taskAttemptInfo = (TaskAttemptInfo) (task.getValue());
		TaskOutputDetails taskOutputDetails = new TaskOutputDetails();
		if(taskAttemptInfo.getTaskStatus().equalsIgnoreCase("SUCCEEDED")){
		taskOutputDetails.setTaskStatus(taskAttemptInfo.getTaskStatus());
		taskOutputDetails.setTaskType(taskAttemptInfo.getTaskType().toString());
		taskOutputDetails.setTaskID(taskAttemptInfo.getAttemptId().getTaskID().toString());
		long startPoint = (taskAttemptInfo.getStartTime() - referencedZeroTime) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
		taskOutputDetails.setStartPoint(startPoint);
		long endPoint = (taskAttemptInfo.getFinishTime() - referencedZeroTime) / CONVERSION_FACTOR_MILLISECS_TO_SECS;
		taskOutputDetails.setEndPoint(endPoint);
		taskOutputDetails.setTimeTaken(endPoint - startPoint);
		taskOutputDetails.setLocation(taskAttemptInfo.getHostname());
		Counters counters = taskAttemptInfo.getCounters();
		CounterGroup fileSystemCounters = counters.getGroup("org.apache.hadoop.mapreduce.FileSystemCounter");
		Counter inputBytes = fileSystemCounters.findCounter("HDFS_BYTES_READ");
		long dataFlowRate = inputBytes.getValue() / (endPoint - startPoint);
		taskOutputDetails.setDataFlowRate(dataFlowRate);
		CounterGroup mapReduceTaskCounters = counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter");
		Counter mapOutputRecords = mapReduceTaskCounters.findCounter("MAP_OUTPUT_RECORDS");
		Counter physicalMemoryBytes = mapReduceTaskCounters.findCounter("PHYSICAL_MEMORY_BYTES");
		ResourceUsageMetrics rum = new ResourceUsageMetrics();
		rum.setPhysicalMemoryUsage(physicalMemoryBytes.getValue());
		taskOutputDetails.setResourceUsageMetrics(rum);
		taskOutputDetails.setOutputRecords(mapOutputRecords.getValue());
		Counter mapOutputBytes = mapReduceTaskCounters.findCounter("MAP_OUTPUT_BYTES");
		taskOutputDetails.setOutputBytes(mapOutputBytes.getValue());}
 		return taskOutputDetails;
}
 
源代码15 项目: hadoop   文件: TaskAttemptRecoverEvent.java
public TaskAttemptInfo getTaskAttemptInfo() {
  return taInfo;
}
 
源代码16 项目: hadoop   文件: TaskImpl.java
@Override
public int compare(TaskAttemptInfo a, TaskAttemptInfo b) {
  long diff = a.getFinishTime() - b.getFinishTime();
  return diff == 0 ? 0 : (diff < 0 ? -1 : 1);
}
 
源代码17 项目: hadoop   文件: MRAppMaster.java
private void parsePreviousJobHistory() throws IOException {
  FSDataInputStream in = getPreviousJobHistoryStream(getConfig(),
      appAttemptID);
  JobHistoryParser parser = new JobHistoryParser(in);
  JobInfo jobInfo = parser.parse();
  Exception parseException = parser.getParseException();
  if (parseException != null) {
    LOG.info("Got an error parsing job-history file" +
        ", ignoring incomplete events.", parseException);
  }
  Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
      .getAllTasks();
  for (TaskInfo taskInfo : taskInfos.values()) {
    if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
      Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator =
          taskInfo.getAllTaskAttempts().entrySet().iterator();
      while (taskAttemptIterator.hasNext()) {
        Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
        if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
          taskAttemptIterator.remove();
        }
      }
      completedTasksFromPreviousRun
          .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
      LOG.info("Read from history task "
          + TypeConverter.toYarn(taskInfo.getTaskId()));
    }
  }
  LOG.info("Read completed tasks from history "
      + completedTasksFromPreviousRun.size());
  recoveredJobStartTime = jobInfo.getLaunchTime();

  // recover AMInfos
  List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos();
  if (jhAmInfoList != null) {
    for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) {
      AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
          jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
          jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
          jhAmInfo.getNodeManagerHttpPort());
      amInfos.add(amInfo);
    }
  }
}
 
源代码18 项目: hadoop   文件: TestRecovery.java
@Test
public void testRecoverySuccessAttempt() {
  LOG.info("--- START: testRecoverySuccessAttempt ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.SUCCEEDED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo,mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.SUCCEEDED);
  finalAttemptStates.put(taId2, TaskAttemptState.FAILED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.TASK_FINISHED);
  recoveryChecker(recoverMapTask, TaskState.SUCCEEDED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 1L);
}
 
源代码19 项目: hadoop   文件: TestRecovery.java
@Test
public void testRecoveryAllFailAttempts() {
  LOG.info("--- START: testRecoveryAllFailAttempts ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);

  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("FAILED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.FAILED);
  finalAttemptStates.put(taId2, TaskAttemptState.FAILED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.TASK_FAILED);
  recoveryChecker(recoverMapTask, TaskState.FAILED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 2L);
}
 
源代码20 项目: hadoop   文件: TestRecovery.java
@Test
public void testRecoveryTaskSuccessAllAttemptsFail() {
  LOG.info("--- START:  testRecoveryTaskSuccessAllAttemptsFail ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.FAILED);
  finalAttemptStates.put(taId2, TaskAttemptState.FAILED);
  // check for one new attempt launched since successful attempt not found
  TaskAttemptID taId3 = new TaskAttemptID(taskID, 2000);
  finalAttemptStates.put(taId3, TaskAttemptState.NEW);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  recoveryChecker(recoverMapTask, TaskState.RUNNING, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 2L);
}
 
源代码21 项目: hadoop   文件: TestRecovery.java
@Test
public void testRecoveryTaskSuccessAllAttemptsSucceed() {
  LOG.info("--- START:  testRecoveryTaskSuccessAllAttemptsFail ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.SUCCEEDED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.SUCCEEDED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.SUCCEEDED);
  finalAttemptStates.put(taId2, TaskAttemptState.SUCCEEDED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
  jobHistoryEvents.add(EventType.TASK_FINISHED);
  recoveryChecker(recoverMapTask, TaskState.SUCCEEDED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 0L);
}
 
源代码22 项目: hadoop   文件: TestRecovery.java
@Test
public void testRecoveryAllAttemptsKilled() {
  LOG.info("--- START:  testRecoveryAllAttemptsKilled ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();
  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.KILLED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.KILLED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("KILLED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.KILLED);
  finalAttemptStates.put(taId2, TaskAttemptState.KILLED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
  jobHistoryEvents.add(EventType.TASK_FAILED);
  recoveryChecker(recoverMapTask, TaskState.KILLED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 0L);
}
 
源代码23 项目: big-c   文件: TaskAttemptRecoverEvent.java
public TaskAttemptInfo getTaskAttemptInfo() {
  return taInfo;
}
 
源代码24 项目: big-c   文件: TaskImpl.java
@Override
public int compare(TaskAttemptInfo a, TaskAttemptInfo b) {
  long diff = a.getFinishTime() - b.getFinishTime();
  return diff == 0 ? 0 : (diff < 0 ? -1 : 1);
}
 
源代码25 项目: big-c   文件: MRAppMaster.java
private void parsePreviousJobHistory() throws IOException {
  FSDataInputStream in = getPreviousJobHistoryStream(getConfig(),
      appAttemptID);
  JobHistoryParser parser = new JobHistoryParser(in);
  JobInfo jobInfo = parser.parse();
  Exception parseException = parser.getParseException();
  if (parseException != null) {
    LOG.info("Got an error parsing job-history file" +
        ", ignoring incomplete events.", parseException);
  }
  Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
      .getAllTasks();
  for (TaskInfo taskInfo : taskInfos.values()) {
    if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
      Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator =
          taskInfo.getAllTaskAttempts().entrySet().iterator();
      while (taskAttemptIterator.hasNext()) {
        Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
        if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
          taskAttemptIterator.remove();
        }
      }
      completedTasksFromPreviousRun
          .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
      LOG.info("Read from history task "
          + TypeConverter.toYarn(taskInfo.getTaskId()));
    }
  }
  LOG.info("Read completed tasks from history "
      + completedTasksFromPreviousRun.size());
  recoveredJobStartTime = jobInfo.getLaunchTime();

  // recover AMInfos
  List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos();
  if (jhAmInfoList != null) {
    for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) {
      AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
          jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
          jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
          jhAmInfo.getNodeManagerHttpPort());
      amInfos.add(amInfo);
    }
  }
}
 
源代码26 项目: big-c   文件: TestRecovery.java
@Test
public void testRecoverySuccessAttempt() {
  LOG.info("--- START: testRecoverySuccessAttempt ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.SUCCEEDED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo,mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.SUCCEEDED);
  finalAttemptStates.put(taId2, TaskAttemptState.FAILED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.TASK_FINISHED);
  recoveryChecker(recoverMapTask, TaskState.SUCCEEDED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 1L);
}
 
源代码27 项目: big-c   文件: TestRecovery.java
@Test
public void testRecoveryAllFailAttempts() {
  LOG.info("--- START: testRecoveryAllFailAttempts ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);

  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("FAILED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.FAILED);
  finalAttemptStates.put(taId2, TaskAttemptState.FAILED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.TASK_FAILED);
  recoveryChecker(recoverMapTask, TaskState.FAILED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 2L);
}
 
源代码28 项目: big-c   文件: TestRecovery.java
@Test
public void testRecoveryTaskSuccessAllAttemptsFail() {
  LOG.info("--- START:  testRecoveryTaskSuccessAllAttemptsFail ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.FAILED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.FAILED);
  finalAttemptStates.put(taId2, TaskAttemptState.FAILED);
  // check for one new attempt launched since successful attempt not found
  TaskAttemptID taId3 = new TaskAttemptID(taskID, 2000);
  finalAttemptStates.put(taId3, TaskAttemptState.NEW);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
  recoveryChecker(recoverMapTask, TaskState.RUNNING, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 2L);
}
 
源代码29 项目: big-c   文件: TestRecovery.java
@Test
public void testRecoveryTaskSuccessAllAttemptsSucceed() {
  LOG.info("--- START:  testRecoveryTaskSuccessAllAttemptsFail ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();

  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.SUCCEEDED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.SUCCEEDED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.SUCCEEDED);
  finalAttemptStates.put(taId2, TaskAttemptState.SUCCEEDED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
  jobHistoryEvents.add(EventType.TASK_FINISHED);
  recoveryChecker(recoverMapTask, TaskState.SUCCEEDED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 0L);
}
 
源代码30 项目: big-c   文件: TestRecovery.java
@Test
public void testRecoveryAllAttemptsKilled() {
  LOG.info("--- START:  testRecoveryAllAttemptsKilled ---");

  long clusterTimestamp = System.currentTimeMillis();
  EventHandler mockEventHandler = mock(EventHandler.class);
  MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
      mockEventHandler);

  TaskId taskId = recoverMapTask.getID();
  JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
  TaskID taskID = new TaskID(jobID,
      org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());

  //Mock up the TaskAttempts
  Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
      new HashMap<TaskAttemptID, TaskAttemptInfo>();
  TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
  TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
      TaskAttemptState.KILLED);
  mockTaskAttempts.put(taId1, mockTAinfo1);

  TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
  TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
      TaskAttemptState.KILLED);
  mockTaskAttempts.put(taId2, mockTAinfo2);

  OutputCommitter mockCommitter = mock (OutputCommitter.class);
  TaskInfo mockTaskInfo = mock(TaskInfo.class);
  when(mockTaskInfo.getTaskStatus()).thenReturn("KILLED");
  when(mockTaskInfo.getTaskId()).thenReturn(taskID);
  when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);

  recoverMapTask.handle(
      new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(mockEventHandler,atLeast(1)).handle(
      (org.apache.hadoop.yarn.event.Event) arg.capture());

  Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
      new HashMap<TaskAttemptID, TaskAttemptState>();
  finalAttemptStates.put(taId1, TaskAttemptState.KILLED);
  finalAttemptStates.put(taId2, TaskAttemptState.KILLED);

  List<EventType> jobHistoryEvents = new ArrayList<EventType>();
  jobHistoryEvents.add(EventType.TASK_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
  jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
  jobHistoryEvents.add(EventType.TASK_FAILED);
  recoveryChecker(recoverMapTask, TaskState.KILLED, finalAttemptStates,
      arg, jobHistoryEvents, 2L, 0L);
}
 
 类方法
 同包方法