下面列出了怎么用org.apache.hadoop.mapreduce.v2.api.records.TaskType的API类实例代码及写法,或者点击链接到github查看源代码。
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
// too many fetch failure can only happen for map tasks
Preconditions
.checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
//add to diagnostic
taskAttempt.addDiagnosticInfo("Too Many fetch failures.Failing the attempt");
if (taskAttempt.getLaunchTime() != 0) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
TaskAttemptStateInternal.FAILED);
taskAttempt.eventHandler.handle(new JobHistoryEvent(
taskAttempt.attemptId.getTaskId().getJobId(), tauce));
}else {
LOG.debug("Not generating HistoryFinish event since start event not " +
"generated for taskAttempt: " + taskAttempt.getID());
}
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) {
super(ta, type, false);
this.shuffleFinishTime = ta.getShuffleFinishTime();
this.mergeFinishTime = ta.getSortFinishTime();
this.elapsedShuffleTime = Times.elapsed(this.startTime,
this.shuffleFinishTime, false);
if (this.elapsedShuffleTime == -1) {
this.elapsedShuffleTime = 0;
}
this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime,
this.mergeFinishTime, false);
if (this.elapsedMergeTime == -1) {
this.elapsedMergeTime = 0;
}
this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime,
this.finishTime, false);
if (this.elapsedReduceTime == -1) {
this.elapsedReduceTime = 0;
}
}
@GET
@Path("/jobs/{jobid}/tasks")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @QueryParam("type") String type) {
init();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
TasksInfo allTasks = new TasksInfo();
for (Task task : job.getTasks().values()) {
TaskType ttype = null;
if (type != null && !type.isEmpty()) {
try {
ttype = MRApps.taskType(type);
} catch (YarnRuntimeException e) {
throw new BadRequestException("tasktype must be either m or r");
}
}
if (ttype != null && task.getType() != ttype) {
continue;
}
allTasks.add(new TaskInfo(task));
}
return allTasks;
}
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
TaskAttemptsInfo attempts = new TaskAttemptsInfo();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
Task task = getTaskFromTaskIdString(tid, job);
for (TaskAttempt ta : task.getAttempts().values()) {
if (ta != null) {
if (task.getType() == TaskType.REDUCE) {
attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
} else {
attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
}
}
}
return attempts;
}
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid,
@PathParam("attemptid") String attId) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId,
task);
if (task.getType() == TaskType.REDUCE) {
return new ReduceTaskAttemptInfo(ta, task.getType());
} else {
return new TaskAttemptInfo(ta, task.getType(), false);
}
}
@Test
public void testKillDuringTaskAttemptCommit() {
mockTask = createMockTask(TaskType.REDUCE);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
TaskAttemptId commitAttempt = getLastAttempt().getAttemptId();
updateLastAttemptState(TaskAttemptState.KILLED);
killRunningTaskAttempt(commitAttempt);
assertFalse(mockTask.canCommit(commitAttempt));
}
private void computeProgress() {
this.readLock.lock();
try {
float mapProgress = 0f;
float reduceProgress = 0f;
for (Task task : this.tasks.values()) {
if (task.getType() == TaskType.MAP) {
mapProgress += (task.isFinished() ? 1f : task.getProgress());
} else {
reduceProgress += (task.isFinished() ? 1f : task.getProgress());
}
}
if (this.numMapTasks != 0) {
mapProgress = mapProgress / this.numMapTasks;
}
if (this.numReduceTasks != 0) {
reduceProgress = reduceProgress / this.numReduceTasks;
}
this.mapProgress = mapProgress;
this.reduceProgress = reduceProgress;
} finally {
this.readLock.unlock();
}
}
private void computeProgress() {
this.readLock.lock();
try {
float mapProgress = 0f;
float reduceProgress = 0f;
for (Task task : this.tasks.values()) {
if (task.getType() == TaskType.MAP) {
mapProgress += (task.isFinished() ? 1f : task.getProgress());
} else {
reduceProgress += (task.isFinished() ? 1f : task.getProgress());
}
}
if (this.numMapTasks != 0) {
mapProgress = mapProgress / this.numMapTasks;
}
if (this.numReduceTasks != 0) {
reduceProgress = reduceProgress / this.numReduceTasks;
}
this.mapProgress = mapProgress;
this.reduceProgress = reduceProgress;
} finally {
this.readLock.unlock();
}
}
@Override
public GetTaskReportsResponse getTaskReports(
GetTaskReportsRequest request) throws IOException {
JobId jobId = request.getJobId();
TaskType taskType = request.getTaskType();
GetTaskReportsResponse response =
recordFactory.newRecordInstance(GetTaskReportsResponse.class);
Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true);
Collection<Task> tasks = job.getTasks(taskType).values();
LOG.info("Getting task report for " + taskType + " " + jobId
+ ". Report-size will be " + tasks.size());
// Take lock to allow only one call, otherwise heap will blow up because
// of counters in the report when there are multiple callers.
synchronized (getTaskReportsLock) {
for (Task task : tasks) {
response.addTaskReport(task.getReport());
}
}
return response;
}
boolean remove(TaskAttemptId tId) {
ContainerId containerId = null;
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
containerId = maps.remove(tId).getId();
} else {
containerId = reduces.remove(tId).getId();
if (containerId != null) {
boolean preempted = preemptionWaitingReduces.remove(tId);
if (preempted) {
LOG.info("Reduce preemption successful " + tId);
}
}
}
if (containerId != null) {
containerToAttemptMap.remove(containerId);
return true;
}
return false;
}
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
TaskAttemptsInfo attempts = new TaskAttemptsInfo();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
Task task = getTaskFromTaskIdString(tid, job);
for (TaskAttempt ta : task.getAttempts().values()) {
if (ta != null) {
if (task.getType() == TaskType.REDUCE) {
attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
} else {
attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
}
}
}
return attempts;
}
private float getReduceProgress() {
Job job = myAppContext.getJob(myAttemptID.getTaskId().getJobId());
float runtime = getCodeRuntime();
Collection<Task> allMapTasks = job.getTasks(TaskType.MAP).values();
int numberMaps = allMapTasks.size();
int numberDoneMaps = 0;
for (Task mapTask : allMapTasks) {
if (mapTask.isFinished()) {
++numberDoneMaps;
}
}
if (numberMaps == numberDoneMaps) {
shuffleCompletedTime = Math.min(shuffleCompletedTime, clock.getTime());
return Math.min
((float) (clock.getTime() - shuffleCompletedTime)
/ (runtime * 2000.0F) + 0.5F,
1.0F);
} else {
return ((float) numberDoneMaps) / numberMaps * 0.5F;
}
}
private AtomicInteger containerNeed(TaskId taskID) {
JobId jobID = taskID.getJobId();
TaskType taskType = taskID.getTaskType();
ConcurrentMap<JobId, AtomicInteger> relevantMap
= taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;
AtomicInteger result = relevantMap.get(jobID);
if (result == null) {
relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
result = relevantMap.get(jobID);
}
return result;
}
public void verifyAMTaskAttemptXML(Element element, TaskAttempt att,
TaskType ttype) {
verifyTaskAttemptGeneric(att, ttype,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "type"),
WebServicesTestUtils.getXmlString(element, "rack"),
WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
WebServicesTestUtils.getXmlString(element, "diagnostics"),
WebServicesTestUtils.getXmlString(element, "assignedContainerId"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
WebServicesTestUtils.getXmlFloat(element, "progress"));
if (ttype == TaskType.REDUCE) {
verifyReduceTaskAttemptGeneric(att,
WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"),
WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
}
}
private static void completeJobTasks(JobImpl job) {
// complete the map tasks and the reduce tasks so we start committing
int numMaps = job.getTotalMaps();
for (int i = 0; i < numMaps; ++i) {
job.handle(new JobTaskEvent(
MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
TaskState.SUCCEEDED));
Assert.assertEquals(JobState.RUNNING, job.getState());
}
int numReduces = job.getTotalReduces();
for (int i = 0; i < numReduces; ++i) {
job.handle(new JobTaskEvent(
MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
TaskState.SUCCEEDED));
Assert.assertEquals(JobState.RUNNING, job.getState());
}
}
@Test (timeout=100000)
public void testCompletedJob() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
//Re-initialize to verify the delayed load.
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
//Verify tasks loaded based on loadTask parameter.
assertEquals(loadTasks, completedJob.tasksLoaded.get());
assertEquals(1, completedJob.getAMInfos().size());
assertEquals(10, completedJob.getCompletedMaps());
assertEquals(1, completedJob.getCompletedReduces());
assertEquals(12, completedJob.getTasks().size());
//Verify tasks loaded at this point.
assertEquals(true, completedJob.tasksLoaded.get());
assertEquals(10, completedJob.getTasks(TaskType.MAP).size());
assertEquals(2, completedJob.getTasks(TaskType.REDUCE).size());
assertEquals("user", completedJob.getUserName());
assertEquals(JobState.SUCCEEDED, completedJob.getState());
JobReport jobReport = completedJob.getReport();
assertEquals("user", jobReport.getUser());
assertEquals(JobState.SUCCEEDED, jobReport.getJobState());
}
/**
* Given the taskId details (JobId, suffix id and task type), it gives the TaskReport
* @param jobId, the JobId instance
* @param id, the suffix id as int
* @param taskType, the task type
* @return the Task Report
* @throws IOException
*/
public TaskReport getTaskReport(JobId jobId, int id, TaskType taskType) throws IOException{
TaskId taskId = YarnCommunicatorUtil.getTaskId(jobId, id, taskType);
GetTaskReportRequestProto proto = GetTaskReportRequestProto.getDefaultInstance();
GetTaskReportRequest getTaskReportRequest = new GetTaskReportRequestPBImpl(proto);
getTaskReportRequest.setTaskId(taskId);
GetTaskReportResponse taskReportResponse = proxy.getTaskReport(getTaskReportRequest);
return taskReportResponse.getTaskReport();
}
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId attemptId = null;
if (event instanceof TaskTAttemptEvent) {
TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
attemptId = castEvent.getTaskAttemptID();
if (task.getInternalState() == TaskStateInternal.SUCCEEDED &&
!attemptId.equals(task.successfulAttempt)) {
// don't allow a different task attempt to override a previous
// succeeded state
task.finishedAttempts.add(castEvent.getTaskAttemptID());
task.inProgressAttempts.remove(castEvent.getTaskAttemptID());
return TaskStateInternal.SUCCEEDED;
}
}
// a successful REDUCE task should not be overridden
// TODO: consider moving it to MapTaskImpl
if (!TaskType.MAP.equals(task.getType())) {
LOG.error("Unexpected event for REDUCE task " + event.getType());
task.internalError(event.getType());
}
// successful attempt is now killed. reschedule
// tell the job about the rescheduling
unSucceed(task);
task.handleTaskAttemptCompletion(attemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
// typically we are here because this map task was run on a bad node and
// we want to reschedule it on a different node.
// Depending on whether there are previous failed attempts or not this
// can SCHEDULE or RESCHEDULE the container allocate request. If this
// SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
// from the map splitInfo. So the bad node might be sent as a location
// to the RM. But the RM would ignore that just like it would ignore
// currently pending container requests affinitized to bad nodes.
task.addAndScheduleAttempt(Avataar.VIRGIN);
return TaskStateInternal.SCHEDULED;
}
void incr(Task task) {
TaskType type = task.getType();
boolean finished = task.isFinished();
if (type == TaskType.MAP) {
if (finished) {
++completedMaps;
}
++maps;
} else if (type == TaskType.REDUCE) {
if (finished) {
++completedReduces;
}
++reduces;
}
}
@Test
public void testTaskProgress() {
LOG.info("--- START: testTaskProgress ---");
mockTask = createMockTask(TaskType.MAP);
// launch task
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
float progress = 0f;
assert(mockTask.getProgress() == progress);
launchTaskAttempt(getLastAttempt().getAttemptId());
// update attempt1
progress = 50f;
updateLastAttemptProgress(progress);
assert(mockTask.getProgress() == progress);
progress = 100f;
updateLastAttemptProgress(progress);
assert(mockTask.getProgress() == progress);
progress = 0f;
// mark first attempt as killed
updateLastAttemptState(TaskAttemptState.KILLED);
assert(mockTask.getProgress() == progress);
// kill first attempt
// should trigger a new attempt
// as no successful attempts
killRunningTaskAttempt(getLastAttempt().getAttemptId());
assert(taskAttempts.size() == 2);
assert(mockTask.getProgress() == 0f);
launchTaskAttempt(getLastAttempt().getAttemptId());
progress = 50f;
updateLastAttemptProgress(progress);
assert(mockTask.getProgress() == progress);
}
private TaskAttemptImpl createMapTaskAttemptImplForTest(
EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
ApplicationId appId = ApplicationId.newInstance(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
Path jobFile = mock(Path.class);
JobConf jobConf = new JobConf();
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
taskSplitMetaInfo, jobConf, taListener, null,
null, clock, null);
return taImpl;
}
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId,
TaskType taskType, int id) {
ApplicationId aID = ApplicationId.newInstance(ts, appId);
JobId jID = MRBuilderUtils.newJobId(aID, id);
TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
return MRBuilderUtils.newTaskAttemptId(tID, id);
}
private int getGpuRequired(Configuration conf, TaskType taskType) {
int gcores = 0;
if (taskType == TaskType.MAP) {
gcores =
conf.getInt(MRJobConfig.MAP_GPU_CORES,
MRJobConfig.DEFAULT_MAP_GPU_CORES);
} else if (taskType == TaskType.REDUCE) {
gcores =
conf.getInt(MRJobConfig.REDUCE_GPU_CORES,
MRJobConfig.DEFAULT_REDUCE_GPU_CORES);
}
return gcores;
}
@Test
public void testKillSuccessfulTask() {
LOG.info("--- START: testKillSuccesfulTask ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
assertTaskSucceededState();
mockTask.handle(new TaskEvent(taskId, TaskEventType.T_KILL));
assertTaskSucceededState();
}
@Test (timeout = 120000)
public void testTaskIDtoString() {
TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
tid.getJobId().setAppId(ApplicationId.newInstance(0, 0));
tid.setTaskType(TaskType.MAP);
TaskType type = tid.getTaskType();
System.err.println(type);
type = TaskType.REDUCE;
System.err.println(type);
System.err.println(tid.getTaskType());
assertEquals("task_0_0000_m_000000", MRApps.toString(tid));
tid.setTaskType(TaskType.REDUCE);
assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
}
@SuppressWarnings("unchecked")
@Override
public void handle(ContainerAllocatorEvent event) {
if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
LOG.info("Processing the event " + event.toString());
// Assign the same container ID as the AM
ContainerId cID =
ContainerId.newContainerId(getContext().getApplicationAttemptId(),
this.containerId.getContainerId());
Container container = recordFactory.newRecordInstance(Container.class);
container.setId(cID);
NodeId nodeId = NodeId.newInstance(this.nmHost, this.nmPort);
container.setNodeId(nodeId);
container.setContainerToken(null);
container.setNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort);
// send the container-assigned event to task attempt
if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) {
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(event.getAttemptID().getTaskId()
.getJobId());
// TODO Setting OTHER_LOCAL_MAP for now.
jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1);
eventHandler.handle(jce);
}
eventHandler.handle(new TaskAttemptContainerAssignedEvent(
event.getAttemptID(), container, applicationACLs));
}
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
loadAllTasks();
if (TaskType.MAP.equals(taskType)) {
return mapTasks;
} else {//we have only two types of tasks
return reduceTasks;
}
}
@Override
public long thresholdRuntime(TaskId taskID) {
JobId jobID = taskID.getJobId();
Job job = context.getJob(jobID);
TaskType type = taskID.getTaskType();
DataStatistics statistics
= dataStatisticsForTask(taskID);
int completedTasksOfType
= type == TaskType.MAP
? job.getCompletedMaps() : job.getCompletedReduces();
int totalTasksOfType
= type == TaskType.MAP
? job.getTotalMaps() : job.getTotalReduces();
if (completedTasksOfType < MINIMUM_COMPLETE_NUMBER_TO_SPECULATE
|| (((float)completedTasksOfType) / totalTasksOfType)
< MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE ) {
return Long.MAX_VALUE;
}
long result = statistics == null
? Long.MAX_VALUE
: (long)statistics.outlier(slowTaskRelativeTresholds.get(job));
return result;
}
boolean remove(TaskAttemptId tId) {
ContainerRequest req = null;
if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) {
req = maps.remove(tId);
} else {
req = reduces.remove(tId);
}
if (req == null) {
return false;
} else {
decContainerReq(req);
return true;
}
}
public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
JobConf conf, Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
AppContext appContext, TaskType taskType) {
super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
dataLocations, jobToken, credentials, clock, appContext);
this.taskType = taskType;
}