类org.apache.hadoop.mapreduce.v2.api.records.TaskId源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.v2.api.records.TaskId的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: jumbune   文件: MRCommunicator.java
/**
 * This method tries to extract all Map OR Reduce attempt Task Reports for a given Job Id
 * @param taskType, TaskType {MAP|REDUCE}
 * @param jobId, the Job Id for which all Task Reports requires to be extracted
 * @return, Map<TaskId, TaskReport>
 * @throws IOException
 */
public Map<TaskId, TaskReport> getTaskTypeWiseTaskReports(TaskType taskType, JobId jobId) throws IOException{
	Map<TaskId, TaskReport> reports = new HashMap<TaskId, TaskReport>();
	TaskReport report;

	//Attempting to extract Task Type wise Attempt Reports
	boolean rme = false;
	int id = 0;
	do{
		try{
			report = getTaskReport(jobId, id, taskType);
			TaskId taskId = MRBuilderUtils.newTaskId(jobId, id, taskType);
			reports.put(taskId, report);
			id++;
		}catch(RemoteException re){
			rme = true;
		}
	}while(!rme);

	return reports;
}
 
源代码2 项目: big-c   文件: TestBlocks.java
private Task getTask(long timestamp) {
  
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  jobId.setAppId(ApplicationIdPBImpl.newInstance(timestamp,1));

  TaskId taskId = new TaskIdPBImpl();
  taskId.setId(0);
  taskId.setTaskType(TaskType.REDUCE);
  taskId.setJobId(jobId);
  Task task = mock(Task.class);
  when(task.getID()).thenReturn(taskId);
  TaskReport report = mock(TaskReport.class);
  when(report.getProgress()).thenReturn(0.7f);
  when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
  when(report.getStartTime()).thenReturn(100001L);
  when(report.getFinishTime()).thenReturn(100011L);

  when(task.getReport()).thenReturn(report);
  when(task.getType()).thenReturn(TaskType.REDUCE);
  return task;
}
 
源代码3 项目: hadoop   文件: HsWebServices.java
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/counters")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public JobTaskCounterInfo getSingleTaskCounters(
    @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
    @PathParam("taskid") String tid) {

  init();
  Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
  checkAccess(job, hsr);
  TaskId taskID = MRApps.toTaskID(tid);
  if (taskID == null) {
    throw new NotFoundException("taskid " + tid + " not found or invalid");
  }
  Task task = job.getTask(taskID);
  if (task == null) {
    throw new NotFoundException("task not found with id " + tid);
  }
  return new JobTaskCounterInfo(task);
}
 
源代码4 项目: big-c   文件: JobImpl.java
protected void addTask(Task task) {
  synchronized (tasksSyncHandle) {
    if (lazyTasksCopyNeeded) {
      Map<TaskId, Task> newTasks = new LinkedHashMap<TaskId, Task>();
      newTasks.putAll(tasks);
      tasks = newTasks;
      lazyTasksCopyNeeded = false;
    }
  }
  tasks.put(task.getID(), task);
  if (task.getType() == TaskType.MAP) {
    mapTasks.add(task.getID());
  } else if (task.getType() == TaskType.REDUCE) {
    reduceTasks.add(task.getID());
  }
  metrics.waitingTask(task);
}
 
源代码5 项目: hadoop   文件: HsAttemptsPage.java
@Override
protected Collection<TaskAttempt> getTaskAttempts() {
  List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>();
  String taskTypeStr = $(TASK_TYPE);
  TaskType taskType = MRApps.taskType(taskTypeStr);
  String attemptStateStr = $(ATTEMPT_STATE);
  TaskAttemptStateUI neededState = MRApps
      .taskAttemptState(attemptStateStr);
  Job j = app.getJob();
  Map<TaskId, Task> tasks = j.getTasks(taskType);
  for (Task task : tasks.values()) {
    Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
    for (TaskAttempt attempt : attempts.values()) {
      if (neededState.correspondsTo(attempt.getState())) {
        fewTaskAttemps.add(attempt);
      }
    }
  }
  return fewTaskAttemps;
}
 
源代码6 项目: hadoop   文件: TestAMWebServicesTasks.java
public void verifyTaskGeneric(Task task, String id, String state,
    String type, String successfulAttempt, long startTime, long finishTime,
    long elapsedTime, float progress, String status) {

  TaskId taskid = task.getID();
  String tid = MRApps.toString(taskid);
  TaskReport report = task.getReport();

  WebServicesTestUtils.checkStringMatch("id", tid, id);
  WebServicesTestUtils.checkStringMatch("type", task.getType().toString(),
      type);
  WebServicesTestUtils.checkStringMatch("state", report.getTaskState()
      .toString(), state);
  // not easily checked without duplicating logic, just make sure its here
  assertNotNull("successfulAttempt null", successfulAttempt);
  assertEquals("startTime wrong", report.getStartTime(), startTime);
  assertEquals("finishTime wrong", report.getFinishTime(), finishTime);
  assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
  assertEquals("progress wrong", report.getProgress() * 100, progress, 1e-3f);
  assertEquals("status wrong", report.getStatus(), status);
}
 
源代码7 项目: hadoop   文件: TestAMWebServicesTasks.java
public void verifyAMTaskXML(NodeList nodes, Job job) {

    assertEquals("incorrect number of elements", 2, nodes.getLength());

    for (Task task : job.getTasks().values()) {
      TaskId id = task.getID();
      String tid = MRApps.toString(id);
      Boolean found = false;
      for (int i = 0; i < nodes.getLength(); i++) {
        Element element = (Element) nodes.item(i);

        if (tid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
          found = true;
          verifyAMSingleTaskXML(element, task);
        }
      }
      assertTrue("task with id: " + tid + " not in web service output", found);
    }
  }
 
源代码8 项目: big-c   文件: TestJobHistoryEntities.java
@Test (timeout=10000)
public void testCompletedTaskAttempt() throws Exception {
  HistoryFileInfo info = mock(HistoryFileInfo.class);
  when(info.getConfFile()).thenReturn(fullConfPath);
  completedJob =
    new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
        info, jobAclsManager);
  TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
  TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);
  
  Task mt1 = completedJob.getTask(mt1Id);
  Task rt1 = completedJob.getTask(rt1Id);
  
  TaskAttempt mta1 = mt1.getAttempt(mta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
  assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", mta1.getNodeHttpAddress());
  TaskAttemptReport mta1Report = mta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
  assertEquals("localhost", mta1Report.getNodeManagerHost());
  assertEquals(45454, mta1Report.getNodeManagerPort());
  assertEquals(9999, mta1Report.getNodeManagerHttpPort());
  
  TaskAttempt rta1 = rt1.getAttempt(rta1Id);
  assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
  assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
  assertEquals("localhost:9999", rta1.getNodeHttpAddress());
  TaskAttemptReport rta1Report = rta1.getReport();
  assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
  assertEquals("localhost", rta1Report.getNodeManagerHost());
  assertEquals(45454, rta1Report.getNodeManagerPort());
  assertEquals(9999, rta1Report.getNodeManagerHttpPort());
}
 
源代码9 项目: hadoop   文件: TaskAttemptIdPBImpl.java
@Override
public synchronized void setTaskId(TaskId taskId) {
  maybeInitBuilder();
  if (taskId == null)
    builder.clearTaskId();
  this.taskId = taskId;
}
 
源代码10 项目: hadoop   文件: DefaultSpeculator.java
/**
 * Absorbs one TaskAttemptStatus
 *
 * @param reportedStatus the status report that we got from a task attempt
 *        that we want to fold into the speculation data for this job
 * @param timestamp the time this status corresponds to.  This matters
 *        because statuses contain progress.
 */
protected void statusUpdate(TaskAttemptStatus reportedStatus, long timestamp) {

  String stateString = reportedStatus.taskState.toString();

  TaskAttemptId attemptID = reportedStatus.id;
  TaskId taskID = attemptID.getTaskId();
  Job job = context.getJob(taskID.getJobId());

  if (job == null) {
    return;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return;
  }

  estimator.updateAttempt(reportedStatus, timestamp);

  if (stateString.equals(TaskAttemptState.RUNNING.name())) {
    runningTasks.putIfAbsent(taskID, Boolean.TRUE);
  } else {
    runningTasks.remove(taskID, Boolean.TRUE);
    if (!stateString.equals(TaskAttemptState.STARTING.name())) {
      runningTaskAttemptStatistics.remove(attemptID);
    }
  }
}
 
源代码11 项目: big-c   文件: TaskAttemptIdPBImpl.java
@Override
public synchronized void setTaskId(TaskId taskId) {
  maybeInitBuilder();
  if (taskId == null)
    builder.clearTaskId();
  this.taskId = taskId;
}
 
源代码12 项目: big-c   文件: TestAppController.java
@Before
public void setUp() throws IOException {
  AppContext context = mock(AppContext.class);
  when(context.getApplicationID()).thenReturn(
      ApplicationId.newInstance(0, 0));
  when(context.getApplicationName()).thenReturn("AppName");
  when(context.getUser()).thenReturn("User");
  when(context.getStartTime()).thenReturn(System.currentTimeMillis());
  job = mock(Job.class);
  Task task = mock(Task.class);

  when(job.getTask(any(TaskId.class))).thenReturn(task);

  JobId jobID = MRApps.toJobID("job_01_01");
  when(context.getJob(jobID)).thenReturn(job);
  when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
      .thenReturn(true);

  App app = new App(context);
  Configuration configuration = new Configuration();
  ctx = mock(RequestContext.class);

  appController = new AppControllerForTest(app, configuration, ctx);
  appController.getProperty().put(AMParams.JOB_ID, "job_01_01");
  appController.getProperty().put(AMParams.TASK_ID, "task_01_01_m01_01");

}
 
源代码13 项目: big-c   文件: CompletedJob.java
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
  loadAllTasks();
  if (TaskType.MAP.equals(taskType)) {
    return mapTasks;
  } else {//we have only two types of tasks
    return reduceTasks;
  }
}
 
源代码14 项目: hadoop   文件: TestContainerLauncherImpl.java
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, 
    TaskType taskType, int id) {
  ApplicationId aID = ApplicationId.newInstance(ts, appId);
  JobId jID = MRBuilderUtils.newJobId(aID, id);
  TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
  return MRBuilderUtils.newTaskAttemptId(tID, id);
}
 
源代码15 项目: big-c   文件: TestContainerLauncherImpl.java
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, 
    TaskType taskType, int id) {
  ApplicationId aID = ApplicationId.newInstance(ts, appId);
  JobId jID = MRBuilderUtils.newJobId(aID, id);
  TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
  return MRBuilderUtils.newTaskAttemptId(tID, id);
}
 
源代码16 项目: big-c   文件: TestTaskAttempt.java
private TaskAttemptImpl createMapTaskAttemptImplForTest(
    EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  Path jobFile = mock(Path.class);
  JobConf jobConf = new JobConf();
  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          taskSplitMetaInfo, jobConf, taListener, null,
          null, clock, null);
  return taImpl;
}
 
源代码17 项目: big-c   文件: TestBlocks.java
/**
 * test HsTasksBlock's rendering.
 */
@Test
public void testHsTasksBlock() {

  Task task = getTask(0);

  Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
  tasks.put(task.getID(), task);

  AppContext ctx = mock(AppContext.class);
  AppForTest app = new AppForTest(ctx);
  Job job = mock(Job.class);
  when(job.getTasks()).thenReturn(tasks);

  app.setJob(job);

  HsTasksBlockForTest block = new HsTasksBlockForTest(app);

  block.addParameter(AMParams.TASK_TYPE, "r");

  PrintWriter pWriter = new PrintWriter(data);
  Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);

  block.render(html);
  pWriter.flush();
  // should be printed information about task
  assertTrue(data.toString().contains("task_0_0001_r_000000"));
  assertTrue(data.toString().contains("SUCCEEDED"));
  assertTrue(data.toString().contains("100001"));
  assertTrue(data.toString().contains("100011"));
  assertTrue(data.toString().contains(""));
}
 
源代码18 项目: big-c   文件: TestMRApps.java
@Test (timeout = 120000)
public void testTaskIDtoString() {
  TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
  tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
  tid.getJobId().setAppId(ApplicationId.newInstance(0, 0));
  tid.setTaskType(TaskType.MAP);
  TaskType type = tid.getTaskType();
  System.err.println(type);
  type = TaskType.REDUCE;
  System.err.println(type);
  System.err.println(tid.getTaskType());
  assertEquals("task_0_0000_m_000000", MRApps.toString(tid));
  tid.setTaskType(TaskType.REDUCE);
  assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
}
 
源代码19 项目: hadoop   文件: JobImpl.java
@Override
public void transition(JobImpl job, JobEvent event) {
  //get number of shuffling reduces
  int shufflingReduceTasks = 0;
  for (TaskId taskId : job.reduceTasks) {
    Task task = job.tasks.get(taskId);
    if (TaskState.RUNNING.equals(task.getState())) {
      for(TaskAttempt attempt : task.getAttempts().values()) {
        if(attempt.getPhase() == Phase.SHUFFLE) {
          shufflingReduceTasks++;
          break;
        }
      }
    }
  }

  JobTaskAttemptFetchFailureEvent fetchfailureEvent = 
    (JobTaskAttemptFetchFailureEvent) event;
  for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId mapId : 
        fetchfailureEvent.getMaps()) {
    Integer fetchFailures = job.fetchFailuresMapping.get(mapId);
    fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
    job.fetchFailuresMapping.put(mapId, fetchFailures);
    
    float failureRate = shufflingReduceTasks == 0 ? 1.0f : 
      (float) fetchFailures / shufflingReduceTasks;
    // declare faulty if fetch-failures >= max-allowed-failures
    if (fetchFailures >= job.getMaxFetchFailuresNotifications()
        && failureRate >= job.getMaxAllowedFetchFailuresFraction()) {
      LOG.info("Too many fetch-failures for output of task attempt: " + 
          mapId + " ... raising fetch failure to map");
      job.eventHandler.handle(new TaskAttemptEvent(mapId, 
          TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
      job.fetchFailuresMapping.remove(mapId);
    }
  }
}
 
源代码20 项目: hadoop   文件: MRClientService.java
private Task verifyAndGetTask(TaskId taskID, 
    JobACL accessType) throws IOException {
  Task task =
      verifyAndGetJob(taskID.getJobId(), accessType, true).getTask(taskID);
  if (task == null) {
    throw new IOException("Unknown Task " + taskID);
  }
  return task;
}
 
源代码21 项目: big-c   文件: JobImpl.java
@Override
public Map<TaskId, Task> getTasks() {
  synchronized (tasksSyncHandle) {
    lazyTasksCopyNeeded = true;
    return Collections.unmodifiableMap(tasks);
  }
}
 
源代码22 项目: hadoop   文件: MRBuilderUtils.java
public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
  TaskId taskId = Records.newRecord(TaskId.class);
  taskId.setJobId(jobId);
  taskId.setId(id);
  taskId.setTaskType(taskType);
  return taskId;
}
 
源代码23 项目: big-c   文件: TestTaskImpl.java
@Test
public void testKillSuccessfulTask() {
  LOG.info("--- START: testKillSuccesfulTask ---");
  mockTask = createMockTask(TaskType.MAP);
  TaskId taskId = getNewTaskID();
  scheduleTaskAttempt(taskId);
  launchTaskAttempt(getLastAttempt().getAttemptId());
  commitTaskAttempt(getLastAttempt().getAttemptId());
  mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
      TaskEventType.T_ATTEMPT_SUCCEEDED));
  assertTaskSucceededState();
  mockTask.handle(new TaskEvent(taskId, TaskEventType.T_KILL));
  assertTaskSucceededState();
}
 
源代码24 项目: hadoop   文件: TestRuntimeEstimators.java
@Override
public void handle(TaskEvent event) {
  TaskId taskID = event.getTaskID();
  Task task = myJob.getTask(taskID);

  Assert.assertEquals
      ("Wrong type event", TaskEventType.T_ADD_SPEC_ATTEMPT, event.getType());

  System.out.println("SpeculationRequestEventHandler.handle adds a speculation task for " + taskID);

  addAttempt(task);
}
 
源代码25 项目: hadoop   文件: TestTaskImpl.java
public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
    TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
    JobConf conf, Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext, TaskType taskType) {
  super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
      dataLocations, jobToken, credentials, clock, appContext);
  this.taskType = taskType;
}
 
源代码26 项目: hadoop   文件: TestTaskImpl.java
@Test
public void testFailureDuringTaskAttemptCommit() {
  mockTask = createMockTask(TaskType.MAP);        
  TaskId taskId = getNewTaskID();
  scheduleTaskAttempt(taskId);
  launchTaskAttempt(getLastAttempt().getAttemptId());
  updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
  commitTaskAttempt(getLastAttempt().getAttemptId());

  // During the task attempt commit there is an exception which causes
  // the attempt to fail
  updateLastAttemptState(TaskAttemptState.FAILED);
  failRunningTaskAttempt(getLastAttempt().getAttemptId());

  assertEquals(2, taskAttempts.size());
  updateLastAttemptState(TaskAttemptState.SUCCEEDED);
  commitTaskAttempt(getLastAttempt().getAttemptId());
  mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), 
      TaskEventType.T_ATTEMPT_SUCCEEDED));
  
  assertFalse("First attempt should not commit",
      mockTask.canCommit(taskAttempts.get(0).getAttemptId()));
  assertTrue("Second attempt should commit",
      mockTask.canCommit(getLastAttempt().getAttemptId()));

  assertTaskSucceededState();
}
 
源代码27 项目: hadoop   文件: TestRMContainerAllocator.java
private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
    int taskAttemptId, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId =
      MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
  return new ContainerAllocatorEvent(attemptId,
      ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
}
 
源代码28 项目: hadoop   文件: MRBuilderUtils.java
public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
  TaskAttemptId taskAttemptId =
      Records.newRecord(TaskAttemptId.class);
  taskAttemptId.setTaskId(taskId);
  taskAttemptId.setId(attemptId);
  return taskAttemptId;
}
 
源代码29 项目: hadoop   文件: TestKill.java
@Test
public void testKillJob() throws Exception {
  final CountDownLatch latch = new CountDownLatch(1);
  
  MRApp app = new BlockingMRApp(1, 0, latch);
  //this will start the job but job won't complete as task is
  //blocked
  Job job = app.submit(new Configuration());
  
  //wait and vailidate for Job to become RUNNING
  app.waitForState(job, JobState.RUNNING);
  
  //send the kill signal to Job
  app.getContext().getEventHandler().handle(
      new JobEvent(job.getID(), JobEventType.JOB_KILL));
  
  //unblock Task
  latch.countDown();

  //wait and validate for Job to be KILLED
  app.waitForState(job, JobState.KILLED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("No of tasks is not correct", 1, 
      tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.KILLED, 
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts = 
    tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("No of attempts is not correct", 1, 
      attempts.size());
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED, 
        it.next().getReport().getTaskAttemptState());
}
 
源代码30 项目: big-c   文件: TaskRecoverEvent.java
public TaskRecoverEvent(TaskId taskID, TaskInfo taskInfo,
    OutputCommitter committer, boolean recoverTaskOutput) {
  super(taskID, TaskEventType.T_RECOVER);
  this.taskInfo = taskInfo;
  this.committer = committer;
  this.recoverTaskOutput = recoverTaskOutput;
}
 
 同包方法