类org.apache.hadoop.mapreduce.v2.api.records.JobId源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.v2.api.records.JobId的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestAMWebServicesAttempts.java
@Test
public void testTaskAttemptsDefault() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);
    for (Task task : jobsMap.get(id).getTasks().values()) {

      String tid = MRApps.toString(task.getID());
      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
          .path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
          .get(ClientResponse.class);
      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
      JSONObject json = response.getEntity(JSONObject.class);
      verifyAMTaskAttempts(json, task);
    }
  }
}
 
源代码2 项目: big-c   文件: TestAMWebServicesJobs.java
@Test
public void testJobCounters() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
        .path("jobs").path(jobId).path("counters")
        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("jobCounters");
    verifyAMJobCounters(info, jobsMap.get(id));
  }
}
 
源代码3 项目: big-c   文件: TestAMWebServicesAttempts.java
@Test
public void testTaskAttempts() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);
    for (Task task : jobsMap.get(id).getTasks().values()) {

      String tid = MRApps.toString(task.getID());
      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
          .path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
      JSONObject json = response.getEntity(JSONObject.class);
      verifyAMTaskAttempts(json, task);
    }
  }
}
 
源代码4 项目: hadoop   文件: DefaultSpeculator.java
private AtomicInteger containerNeed(TaskId taskID) {
  JobId jobID = taskID.getJobId();
  TaskType taskType = taskID.getTaskType();

  ConcurrentMap<JobId, AtomicInteger> relevantMap
      = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;

  AtomicInteger result = relevantMap.get(jobID);

  if (result == null) {
    relevantMap.putIfAbsent(jobID, new AtomicInteger(0));
    result = relevantMap.get(jobID);
  }

  return result;
}
 
源代码5 项目: hadoop   文件: StartEndTimesBase.java
protected DataStatistics dataStatisticsForTask(TaskId taskID) {
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  if (job == null) {
    return null;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return null;
  }

  return task.getType() == TaskType.MAP
          ? mapperStatistics.get(job)
          : task.getType() == TaskType.REDUCE
              ? reducerStatistics.get(job)
              : null;
}
 
源代码6 项目: hadoop   文件: CachedHistoryStorage.java
@Override
public Job getFullJob(JobId jobId) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Looking for Job " + jobId);
  }
  try {
    HistoryFileInfo fileInfo = hsManager.getFileInfo(jobId);
    Job result = null;
    if (fileInfo != null) {
      result = loadedJobCache.get(jobId);
      if (result == null) {
        result = loadJob(fileInfo);
      } else if(fileInfo.isDeleted()) {
        loadedJobCache.remove(jobId);
        result = null;
      }
    } else {
      loadedJobCache.remove(jobId);
    }
    return result;
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
 
源代码7 项目: hadoop   文件: TestJobImpl.java
private static StubbedJob createStubbedJob(Configuration conf,
    Dispatcher dispatcher, int numSplits, AppContext appContext) {
  JobID jobID = JobID.forName("job_1234567890000_0001");
  JobId jobId = TypeConverter.toYarn(jobID);
  if (appContext == null) {
    appContext = mock(AppContext.class);
    when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
  }
  StubbedJob job = new StubbedJob(jobId,
      ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
      conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
  dispatcher.register(JobEventType.class, job);
  EventHandler mockHandler = mock(EventHandler.class);
  dispatcher.register(TaskEventType.class, mockHandler);
  dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
      mockHandler);
  dispatcher.register(JobFinishEvent.Type.class, mockHandler);
  return job;
}
 
源代码8 项目: jumbune   文件: MRCommunicator.java
/**
 * This method tries to extract all Map OR Reduce attempt Task Reports for a given Job Id
 * @param taskType, TaskType {MAP|REDUCE}
 * @param jobId, the Job Id for which all Task Reports requires to be extracted
 * @return, Map<TaskId, TaskReport>
 * @throws IOException
 */
public Map<TaskId, TaskReport> getTaskTypeWiseTaskReports(TaskType taskType, JobId jobId) throws IOException{
	Map<TaskId, TaskReport> reports = new HashMap<TaskId, TaskReport>();
	TaskReport report;

	//Attempting to extract Task Type wise Attempt Reports
	boolean rme = false;
	int id = 0;
	do{
		try{
			report = getTaskReport(jobId, id, taskType);
			TaskId taskId = MRBuilderUtils.newTaskId(jobId, id, taskType);
			reports.put(taskId, report);
			id++;
		}catch(RemoteException re){
			rme = true;
		}
	}while(!rme);

	return reports;
}
 
源代码9 项目: hadoop   文件: TestRMContainerAllocator.java
private ContainerRequestEvent
    createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
        boolean earlierFailedAttempt, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  Resource containerNeed = Resource.newInstance(memory, 1);
  if (earlierFailedAttempt) {
    return ContainerRequestEvent
        .createContainerRequestEventForFailedContainer(attemptId,
            containerNeed);
  }
  return new ContainerRequestEvent(attemptId, containerNeed, hosts,
      new String[] { NetworkTopology.DEFAULT_RACK });
}
 
源代码10 项目: big-c   文件: TestHsWebServicesJobs.java
@Test
public void testJobAttemptsDefault() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("history")
        .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
        .get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("jobAttempts");
    verifyHsJobAttempts(info, appContext.getJob(id));
  }
}
 
源代码11 项目: big-c   文件: TestAMWebServicesTasks.java
@Test
public void testTaskIdDefault() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);
    for (Task task : jobsMap.get(id).getTasks().values()) {

      String tid = MRApps.toString(task.getID());
      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
          .path("jobs").path(jobId).path("tasks").path(tid)
          .get(ClientResponse.class);
      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
      JSONObject json = response.getEntity(JSONObject.class);
      assertEquals("incorrect number of elements", 1, json.length());
      JSONObject info = json.getJSONObject("task");
      verifyAMSingleTask(info, task);
    }
  }
}
 
源代码12 项目: hadoop   文件: TestAMWebServicesJobs.java
@Test
public void testJobCountersDefault() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
        .path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("jobCounters");
    verifyAMJobCounters(info, jobsMap.get(id));
  }
}
 
源代码13 项目: hadoop   文件: TestHsWebServicesJobs.java
@Test
public void testJobAttempts() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("history")
        .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("jobAttempts");
    verifyHsJobAttempts(info, appContext.getJob(id));
  }
}
 
源代码14 项目: big-c   文件: TestAMWebServicesJobConf.java
@Test
public void testJobConfSlash() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
        .path("jobs").path(jobId).path("conf/")
        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("conf");
    verifyAMJobConf(info, jobsMap.get(id));
  }
}
 
源代码15 项目: big-c   文件: MockHistoryJobs.java
private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
  JobsPair ret = new JobsPair();
  ret.full = Maps.newHashMap();
  ret.partial = Maps.newHashMap();
  for(Map.Entry<JobId, Job> entry: mocked.entrySet()) {
    JobId id = entry.getKey();
    Job j = entry.getValue();
    MockCompletedJob mockJob = new MockCompletedJob(j);
    // use MockCompletedJob to set everything below to make sure
    // consistent with what history server would do
    ret.full.put(id, mockJob);
    JobReport report = mockJob.getReport();
    JobIndexInfo info = new JobIndexInfo(report.getStartTime(), 
        report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id, 
        mockJob.getCompletedMaps(), mockJob.getCompletedReduces(),
        String.valueOf(mockJob.getState()));
    info.setJobStartTime(report.getStartTime());
    info.setQueueName(mockJob.getQueueName());
    ret.partial.put(id, new PartialJob(info, id));

  }
  return ret;
}
 
源代码16 项目: hadoop   文件: TestHsWebServicesJobs.java
@Test
public void testJobAttemptsDefault() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("history")
        .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
        .get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);
    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject info = json.getJSONObject("jobAttempts");
    verifyHsJobAttempts(info, appContext.getJob(id));
  }
}
 
源代码17 项目: hadoop   文件: TestJobHistoryParsing.java
/**
 * Simple test PartialJob
 */
@Test(timeout = 3000)
public void testPartialJob() throws Exception {
  JobId jobId = new JobIdPBImpl();
  jobId.setId(0);
  JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
      "jobName", jobId, 3, 2, "JobStatus");
  PartialJob test = new PartialJob(jii, jobId);
  assertEquals(1.0f, test.getProgress(), 0.001);
  assertNull(test.getAllCounters());
  assertNull(test.getTasks());
  assertNull(test.getTasks(TaskType.MAP));
  assertNull(test.getTask(new TaskIdPBImpl()));

  assertNull(test.getTaskAttemptCompletionEvents(0, 100));
  assertNull(test.getMapAttemptCompletionEvents(0, 100));
  assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
  assertNull(test.getAMInfos());

}
 
源代码18 项目: hadoop   文件: TestStagingCleanup.java
@Test (timeout = 30000)
public void testDeletionofStagingOnKill() throws IOException {
  conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
  fs = mock(FileSystem.class);
  when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
  //Staging Dir exists
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  Path stagingDir = MRApps.getStagingAreaDir(conf, user);
  when(fs.exists(stagingDir)).thenReturn(true);
  ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
      0);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 0);
  JobId jobid = recordFactory.newRecordInstance(JobId.class);
  jobid.setAppId(appId);
  ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
  MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
  appMaster.init(conf);
  //simulate the process being killed
  MRAppMaster.MRAppMasterShutdownHook hook = 
    new MRAppMaster.MRAppMasterShutdownHook(appMaster);
  hook.run();
  verify(fs, times(0)).delete(stagingJobPath, true);
}
 
源代码19 项目: hadoop   文件: TestHsWebServicesJobs.java
@Test
public void testJobAttemptsXML() throws Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);

    ClientResponse response = r.path("ws").path("v1").path("history")
        .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
    String xml = response.getEntity(String.class);
    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
    DocumentBuilder db = dbf.newDocumentBuilder();
    InputSource is = new InputSource();
    is.setCharacterStream(new StringReader(xml));
    Document dom = db.parse(is);
    NodeList attempts = dom.getElementsByTagName("jobAttempts");
    assertEquals("incorrect number of elements", 1, attempts.getLength());
    NodeList info = dom.getElementsByTagName("jobAttempt");
    verifyHsJobAttemptsXML(info, appContext.getJob(id));
  }
}
 
源代码20 项目: hadoop   文件: TestFileNameIndexUtils.java
@Test
public void testQueueNamePercentEncoding() throws IOException {
  JobIndexInfo info = new JobIndexInfo();
  JobID oldJobId = JobID.forName(JOB_ID);
  JobId jobId = TypeConverter.toYarn(oldJobId);
  info.setJobId(jobId);
  info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
  info.setUser(USER_NAME);
  info.setJobName(JOB_NAME);
  info.setFinishTime(Long.parseLong(FINISH_TIME));
  info.setNumMaps(Integer.parseInt(NUM_MAPS));
  info.setNumReduces(Integer.parseInt(NUM_REDUCES));
  info.setJobStatus(JOB_STATUS);
  info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
  info.setJobStartTime(Long.parseLong(JOB_START_TIME));

  String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
  Assert.assertTrue("Queue name not encoded correctly into job history file",
      jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
 
源代码21 项目: big-c   文件: StartEndTimesBase.java
protected DataStatistics dataStatisticsForTask(TaskId taskID) {
  JobId jobID = taskID.getJobId();
  Job job = context.getJob(jobID);

  if (job == null) {
    return null;
  }

  Task task = job.getTask(taskID);

  if (task == null) {
    return null;
  }

  return task.getType() == TaskType.MAP
          ? mapperStatistics.get(job)
          : task.getType() == TaskType.REDUCE
              ? reducerStatistics.get(job)
              : null;
}
 
源代码22 项目: XLearning   文件: HistoryClientService.java
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  Job job = verifyAndGetJob(jobId, false);
  GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class);
  if (job != null) {
    response.setJobReport(job.getReport());
  } else {
    response.setJobReport(null);
  }
  return response;
}
 
源代码23 项目: hadoop   文件: TaskIdPBImpl.java
@Override
public synchronized void setJobId(JobId jobId) {
  maybeInitBuilder();
  if (jobId == null)
    builder.clearJobId();
  this.jobId = jobId;
}
 
源代码24 项目: big-c   文件: TestJobListCache.java
@Test (timeout = 1000)
public void testEviction() throws InterruptedException {
  int maxSize = 2;
  JobListCache cache = new JobListCache(maxSize, 1000);

  JobId jobId1 = MRBuilderUtils.newJobId(1, 1, 1);
  HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1);

  JobId jobId2 = MRBuilderUtils.newJobId(2, 2, 2);
  HistoryFileInfo fileInfo2 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2);

  JobId jobId3 = MRBuilderUtils.newJobId(3, 3, 3);
  HistoryFileInfo fileInfo3 = Mockito.mock(HistoryFileInfo.class);
  Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3);

  cache.addIfAbsent(fileInfo1);
  cache.addIfAbsent(fileInfo2);
  cache.addIfAbsent(fileInfo3);

  Collection <HistoryFileInfo> values;
  for (int i = 0; i < 9; i++) {
    values = cache.values();
    if (values.size() > maxSize) {
      Thread.sleep(100);
    } else {
      assertFalse("fileInfo1 should have been evicted",
        values.contains(fileInfo1));
      return;
    }
  }
  fail("JobListCache didn't delete the extra entry");
}
 
源代码25 项目: big-c   文件: TestJobImpl.java
@Test(timeout=20000)
public void testKilledDuringSetup() throws Exception {
  Configuration conf = new Configuration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  AsyncDispatcher dispatcher = new AsyncDispatcher();
  dispatcher.init(conf);
  dispatcher.start();
  OutputCommitter committer = new StubbedOutputCommitter() {
    @Override
    public synchronized void setupJob(JobContext jobContext)
        throws IOException {
      while (!Thread.interrupted()) {
        try {
          wait();
        } catch (InterruptedException e) {
        }
      }
    }
  };
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();

  JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
  JobId jobId = job.getID();
  job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
  assertJobState(job, JobStateInternal.INITED);
  job.handle(new JobStartEvent(jobId));
  assertJobState(job, JobStateInternal.SETUP);

  job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
  assertJobState(job, JobStateInternal.KILLED);
  dispatcher.stop();
  commitHandler.stop();
}
 
源代码26 项目: hadoop   文件: TestJobImpl.java
@Test(timeout=20000)
public void testKilledDuringKillAbort() throws Exception {
  Configuration conf = new Configuration();
  conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
  AsyncDispatcher dispatcher = new AsyncDispatcher();
  dispatcher.init(conf);
  dispatcher.start();
  OutputCommitter committer = new StubbedOutputCommitter() {
    @Override
    public synchronized void abortJob(JobContext jobContext, State state)
        throws IOException {
      while (!Thread.interrupted()) {
        try {
          wait();
        } catch (InterruptedException e) {
        }
      }
    }
  };
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();

  JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
  JobId jobId = job.getID();
  job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
  assertJobState(job, JobStateInternal.INITED);
  job.handle(new JobStartEvent(jobId));
  assertJobState(job, JobStateInternal.SETUP);

  job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
  assertJobState(job, JobStateInternal.KILL_ABORT);

  job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
  assertJobState(job, JobStateInternal.KILLED);
  dispatcher.stop();
  commitHandler.stop();
}
 
源代码27 项目: big-c   文件: MRBuilderUtils.java
public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
  TaskId taskId = Records.newRecord(TaskId.class);
  taskId.setJobId(jobId);
  taskId.setId(id);
  taskId.setTaskType(taskType);
  return taskId;
}
 
源代码28 项目: hadoop   文件: TestRMContainerAllocator.java
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
    String host, boolean reduce) {
  TaskId taskId;
  if (reduce) {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
  } else {
    taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
  }
  TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
      taskAttemptId);
  return new ContainerFailedEvent(attemptId, host);    
}
 
源代码29 项目: hadoop   文件: TestHsWebServicesTasks.java
@Test
public void testTaskIdXML() throws JSONException, Exception {
  WebResource r = resource();
  Map<JobId, Job> jobsMap = appContext.getAllJobs();
  for (JobId id : jobsMap.keySet()) {
    String jobId = MRApps.toString(id);
    for (Task task : jobsMap.get(id).getTasks().values()) {

      String tid = MRApps.toString(task.getID());
      ClientResponse response = r.path("ws").path("v1").path("history")
          .path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);

      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
      String xml = response.getEntity(String.class);
      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
      DocumentBuilder db = dbf.newDocumentBuilder();
      InputSource is = new InputSource();
      is.setCharacterStream(new StringReader(xml));
      Document dom = db.parse(is);
      NodeList nodes = dom.getElementsByTagName("task");
      for (int i = 0; i < nodes.getLength(); i++) {
        Element element = (Element) nodes.item(i);
        verifyHsSingleTaskXML(element, task);
      }
    }
  }
}
 
源代码30 项目: big-c   文件: HistoryClientService.java
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
    throws IOException {
  JobId jobId = request.getJobId();
  Job job = verifyAndGetJob(jobId, true);
  GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
  response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
  return response;
}
 
 同包方法