类org.apache.hadoop.mapreduce.v2.api.records.JobState源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.v2.api.records.JobState的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: JobImpl.java
@Override
public JobState getState() {
  readLock.lock();
  try {
    JobState state = getExternalState(getInternalState());
    if (!appContext.hasSuccessfullyUnregistered()
        && (state == JobState.SUCCEEDED || state == JobState.FAILED
        || state == JobState.KILLED || state == JobState.ERROR)) {
      return lastNonFinalState;
    } else {
      return state;
    }
  } finally {
    readLock.unlock();
  }
}
 
源代码2 项目: big-c   文件: TestJobEndNotifier.java
@Test
public void testNotificationOnLastRetryNormalShutdown() throws Exception {
  HttpServer2 server = startHttpServer();
  // Act like it is the second attempt. Default max attempts is 2
  MRApp app = spy(new MRAppWithCustomContainerAllocator(
      2, 2, true, this.getClass().getName(), true, 2, true));
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
  // Unregistration succeeds: successfullyUnregistered is set
  app.shutDownJob();
  Assert.assertTrue(app.isLastAMRetry());
  Assert.assertEquals(1, JobEndServlet.calledTimes);
  Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
      JobEndServlet.requestUri.getQuery());
  Assert.assertEquals(JobState.SUCCEEDED.toString(),
    JobEndServlet.foundJobState);
  server.stop();
}
 
源代码3 项目: big-c   文件: TestFail.java
@Test
//First attempt is failed and second attempt is passed
//The job succeeds.
public void testFailTask() throws Exception {
  MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
  Configuration conf = new Configuration();
  // this test requires two task attempts, but uberization overrides max to 1
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.SUCCEEDED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
  //one attempt must be failed 
  //and another must have succeeded
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      it.next().getReport().getTaskAttemptState());
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
      it.next().getReport().getTaskAttemptState());
}
 
源代码4 项目: hadoop   文件: TestMRApp.java
@Test
public void testJobRebootOnLastRetryOnUnregistrationFailure()
    throws Exception {
  // make startCount as 2 since this is last retry which equals to
  // DEFAULT_MAX_AM_RETRY
  // The last param mocks the unregistration failure
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2, false);

  Configuration conf = new Configuration();
  Job job = app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an reboot event
  app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
    JobEventType.JOB_AM_REBOOT));

  app.waitForInternalState((JobImpl) job, JobStateInternal.REBOOT);
  // return exteranl state as RUNNING if this is the last retry while
  // unregistration fails
  app.waitForState(job, JobState.RUNNING);
}
 
源代码5 项目: hadoop   文件: TestTaskAttempt.java
private void testMRAppHistory(MRApp app) throws Exception {
  Configuration conf = new Configuration();
  Job job = app.submit(conf);
  app.waitForState(job, JobState.FAILED);
  Map<TaskId, Task> tasks = job.getTasks();

  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.FAILED, task
      .getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next()
      .getAttempts();
  Assert.assertEquals("Num attempts is not correct", 4, attempts.size());

  Iterator<TaskAttempt> it = attempts.values().iterator();
  TaskAttemptReport report = it.next().getReport();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      report.getTaskAttemptState());
  Assert.assertEquals("Diagnostic Information is not Correct",
      "Test Diagnostic Event", report.getDiagnosticInfo());
  report = it.next().getReport();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      report.getTaskAttemptState());
}
 
源代码6 项目: big-c   文件: TestJobImpl.java
private static void completeJobTasks(JobImpl job) {
  // complete the map tasks and the reduce tasks so we start committing
  int numMaps = job.getTotalMaps();
  for (int i = 0; i < numMaps; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
  int numReduces = job.getTotalReduces();
  for (int i = 0; i < numReduces; ++i) {
    job.handle(new JobTaskEvent(
        MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
        TaskState.SUCCEEDED));
    Assert.assertEquals(JobState.RUNNING, job.getState());
  }
}
 
源代码7 项目: hadoop   文件: TestFail.java
@Test
//First attempt is failed and second attempt is passed
//The job succeeds.
public void testFailTask() throws Exception {
  MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
  Configuration conf = new Configuration();
  // this test requires two task attempts, but uberization overrides max to 1
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.SUCCEEDED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
  //one attempt must be failed 
  //and another must have succeeded
  Iterator<TaskAttempt> it = attempts.values().iterator();
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
      it.next().getReport().getTaskAttemptState());
  Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
      it.next().getReport().getTaskAttemptState());
}
 
源代码8 项目: hadoop   文件: TestMRAppComponentDependencies.java
@Test(timeout = 20000)
public void testComponentStopOrder() throws Exception {
  @SuppressWarnings("resource")
  TestMRApp app = new TestMRApp(1, 1, true, this.getClass().getName(), true);
  JobImpl job = (JobImpl) app.submit(new Configuration());
  app.waitForState(job, JobState.SUCCEEDED);
  app.verifyCompleted();

  int waitTime = 20 * 1000;
  while (waitTime > 0 && app.numStops < 2) {
    Thread.sleep(100);
    waitTime -= 100;
  }

  // assert JobHistoryEventHandlerStopped and then clientServiceStopped
  Assert.assertEquals(1, app.JobHistoryEventHandlerStopped);
  Assert.assertEquals(2, app.clientServiceStopped);
}
 
源代码9 项目: big-c   文件: TestJobEndNotifier.java
@Test
public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
    throws Exception {
  HttpServer2 server = startHttpServer();
  MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
      this.getClass().getName(), true, 1, false));
  doNothing().when(app).sysexit();
  JobConf conf = new JobConf();
  conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
      JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
  JobImpl job = (JobImpl)app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  app.getContext().getEventHandler()
    .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
  app.waitForInternalState(job, JobStateInternal.REBOOT);
  // Now shutdown.
  // Unregistration fails: isLastAMRetry is recalculated, this is not
  app.shutDownJob();
  // Not the last AM attempt. So user should that the job is still running.
  app.waitForState(job, JobState.RUNNING);
  Assert.assertFalse(app.isLastAMRetry());
  Assert.assertEquals(0, JobEndServlet.calledTimes);
  Assert.assertNull(JobEndServlet.requestUri);
  Assert.assertNull(JobEndServlet.foundJobState);
  server.stop();
}
 
源代码10 项目: big-c   文件: TestFail.java
@Test
//All Task attempts are timed out, leading to Job failure
public void testTimedOutTask() throws Exception {
  MRApp app = new TimeOutTaskMRApp(1, 0);
  Configuration conf = new Configuration();
  int maxAttempts = 2;
  conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
  // disable uberization (requires entire job to be reattempted, so max for
  // subtask attempts is overridden to 1)
  conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
  Job job = app.submit(conf);
  app.waitForState(job, JobState.FAILED);
  Map<TaskId,Task> tasks = job.getTasks();
  Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
  Task task = tasks.values().iterator().next();
  Assert.assertEquals("Task state not correct", TaskState.FAILED,
      task.getReport().getTaskState());
  Map<TaskAttemptId, TaskAttempt> attempts =
      tasks.values().iterator().next().getAttempts();
  Assert.assertEquals("Num attempts is not correct", maxAttempts,
      attempts.size());
  for (TaskAttempt attempt : attempts.values()) {
    Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
        attempt.getReport().getTaskAttemptState());
  }
}
 
源代码11 项目: big-c   文件: TestMRApp.java
@Test
public void testJobError() throws Exception {
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
  Job job = app.submit(new Configuration());
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an invalid event on task at current state
  app.getContext().getEventHandler().handle(
      new TaskEvent(
          task.getID(), TaskEventType.T_SCHEDULE));

  //this must lead to job error
  app.waitForState(job, JobState.ERROR);
}
 
源代码12 项目: big-c   文件: MRApp.java
public void waitForState(Job job, JobState finalState) throws Exception {
  int timeoutSecs = 0;
  JobReport report = job.getReport();
  while (!finalState.equals(report.getJobState()) &&
      timeoutSecs++ < 20) {
    System.out.println("Job State is : " + report.getJobState() +
        " Waiting for state : " + finalState +
        "   map progress : " + report.getMapProgress() + 
        "   reduce progress : " + report.getReduceProgress());
    report = job.getReport();
    Thread.sleep(500);
  }
  System.out.println("Job State is : " + report.getJobState());
  Assert.assertEquals("Job state is not correct (timedout)", finalState, 
      job.getState());
}
 
源代码13 项目: big-c   文件: MRBuilderUtils.java
public static JobReport newJobReport(JobId jobId, String jobName,
    String userName, JobState state, long submitTime, long startTime, long finishTime,
    float setupProgress, float mapProgress, float reduceProgress,
    float cleanupProgress, String jobFile, List<AMInfo> amInfos,
    boolean isUber, String diagnostics) {
  JobReport report = Records.newRecord(JobReport.class);
  report.setJobId(jobId);
  report.setJobName(jobName);
  report.setUser(userName);
  report.setJobState(state);
  report.setSubmitTime(submitTime);
  report.setStartTime(startTime);
  report.setFinishTime(finishTime);
  report.setSetupProgress(setupProgress);
  report.setCleanupProgress(cleanupProgress);
  report.setMapProgress(mapProgress);
  report.setReduceProgress(reduceProgress);
  report.setJobFile(jobFile);
  report.setAMInfos(amInfos);
  report.setIsUber(isUber);
  report.setDiagnostics(diagnostics);
  return report;
}
 
源代码14 项目: big-c   文件: TypeConverter.java
public static int fromYarn(JobState state) {
  switch (state) {
  case NEW:
  case INITED:
    return org.apache.hadoop.mapred.JobStatus.PREP;
  case RUNNING:
    return org.apache.hadoop.mapred.JobStatus.RUNNING;
  case KILLED:
    return org.apache.hadoop.mapred.JobStatus.KILLED;
  case SUCCEEDED:
    return org.apache.hadoop.mapred.JobStatus.SUCCEEDED;
  case FAILED:
  case ERROR:
    return org.apache.hadoop.mapred.JobStatus.FAILED;
  }
  throw new YarnRuntimeException("Unrecognized job state: " + state);
}
 
源代码15 项目: big-c   文件: TestClientRedirect.java
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
    throws IOException {

  amContact = true;

  JobReport jobReport = recordFactory.newRecordInstance(JobReport.class);
  jobReport.setJobId(request.getJobId());
  jobReport.setJobState(JobState.RUNNING);
  jobReport.setJobName("TestClientRedirect-jobname");
  jobReport.setUser("TestClientRedirect-user");
  jobReport.setStartTime(0L);
  jobReport.setFinishTime(1L);

  GetJobReportResponse response = recordFactory
      .newRecordInstance(GetJobReportResponse.class);
  response.setJobReport(jobReport);
  return response;
}
 
源代码16 项目: hadoop   文件: ClientServiceDelegate.java
public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
    JobID jobId, MRClientProtocol historyServerProxy) {
  this.conf = new Configuration(conf); // Cloning for modifying.
  // For faster redirects from AM to HS.
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
      this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES,
          MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES));
  this.conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
      this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS,
          MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS));
  this.rm = rm;
  this.jobId = jobId;
  this.historyServerProxy = historyServerProxy;
  this.appId = TypeConverter.toYarn(jobId).getAppId();
  notRunningJobs = new HashMap<JobState, HashMap<String, NotRunningJob>>();
}
 
源代码17 项目: hadoop   文件: TypeConverter.java
public static int fromYarn(JobState state) {
  switch (state) {
  case NEW:
  case INITED:
    return org.apache.hadoop.mapred.JobStatus.PREP;
  case RUNNING:
    return org.apache.hadoop.mapred.JobStatus.RUNNING;
  case KILLED:
    return org.apache.hadoop.mapred.JobStatus.KILLED;
  case SUCCEEDED:
    return org.apache.hadoop.mapred.JobStatus.SUCCEEDED;
  case FAILED:
  case ERROR:
    return org.apache.hadoop.mapred.JobStatus.FAILED;
  }
  throw new YarnRuntimeException("Unrecognized job state: " + state);
}
 
源代码18 项目: hadoop   文件: TestHsWebServicesJobsQuery.java
@Test
public void testJobsQueryStateNone() throws JSONException, Exception {
  WebResource r = resource();

   ArrayList<JobState> JOB_STATES = 
       new ArrayList<JobState>(Arrays.asList(JobState.values()));

    // find a state that isn't in use
    Map<JobId, Job> jobsMap = appContext.getAllJobs();
    for (Map.Entry<JobId, Job> entry : jobsMap.entrySet()) {
      JOB_STATES.remove(entry.getValue().getState());
    }

  assertTrue("No unused job states", JOB_STATES.size() > 0);
  JobState notInUse = JOB_STATES.get(0);

  ClientResponse response = r.path("ws").path("v1").path("history")
      .path("mapreduce").path("jobs").queryParam("state", notInUse.toString())
      .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);

  assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
  JSONObject json = response.getEntity(JSONObject.class);
  assertEquals("incorrect number of elements", 1, json.length());
  assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
}
 
源代码19 项目: big-c   文件: TestBlocks.java
private Job getJob() {
  Job job = mock(Job.class);

  JobId jobId = new JobIdPBImpl();

  ApplicationId appId = ApplicationIdPBImpl.newInstance(System.currentTimeMillis(),4);
  jobId.setAppId(appId);
  jobId.setId(1);
  when(job.getID()).thenReturn(jobId);

  JobReport report = mock(JobReport.class);
  when(report.getStartTime()).thenReturn(100010L);
  when(report.getFinishTime()).thenReturn(100015L);

  when(job.getReport()).thenReturn(report);
  when(job.getName()).thenReturn("JobName");
  when(job.getUserName()).thenReturn("UserName");
  when(job.getQueueName()).thenReturn("QueueName");
  when(job.getState()).thenReturn(JobState.SUCCEEDED);
  when(job.getTotalMaps()).thenReturn(3);
  when(job.getCompletedMaps()).thenReturn(2);
  when(job.getTotalReduces()).thenReturn(2);
  when(job.getCompletedReduces()).thenReturn(1);
  when(job.getCompletedReduces()).thenReturn(1);
  return job;
}
 
源代码20 项目: hadoop   文件: MRBuilderUtils.java
public static JobReport newJobReport(JobId jobId, String jobName,
    String userName, JobState state, long submitTime, long startTime, long finishTime,
    float setupProgress, float mapProgress, float reduceProgress,
    float cleanupProgress, String jobFile, List<AMInfo> amInfos,
    boolean isUber, String diagnostics) {
  JobReport report = Records.newRecord(JobReport.class);
  report.setJobId(jobId);
  report.setJobName(jobName);
  report.setUser(userName);
  report.setJobState(state);
  report.setSubmitTime(submitTime);
  report.setStartTime(startTime);
  report.setFinishTime(finishTime);
  report.setSetupProgress(setupProgress);
  report.setCleanupProgress(cleanupProgress);
  report.setMapProgress(mapProgress);
  report.setReduceProgress(reduceProgress);
  report.setJobFile(jobFile);
  report.setAMInfos(amInfos);
  report.setIsUber(isUber);
  report.setDiagnostics(diagnostics);
  return report;
}
 
源代码21 项目: hadoop   文件: TestTypeConverter.java
@Test
public void testEnums() throws Exception {
  for (YarnApplicationState applicationState : YarnApplicationState.values()) {
    TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
  }
  // ad hoc test of NEW_SAVING, which is newly added
  Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
      YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));
  
  for (TaskType taskType : TaskType.values()) {
    TypeConverter.fromYarn(taskType);
  }
  
  for (JobState jobState : JobState.values()) {
    TypeConverter.fromYarn(jobState);
  }
  
  for (QueueState queueState : QueueState.values()) {
    TypeConverter.fromYarn(queueState);
  }
  
  for (TaskState taskState : TaskState.values()) {
    TypeConverter.fromYarn(taskState);
  }
}
 
源代码22 项目: big-c   文件: ClientServiceDelegate.java
private NotRunningJob getNotRunningJob(ApplicationReport applicationReport,
    JobState state) {
  synchronized (notRunningJobs) {
    HashMap<String, NotRunningJob> map = notRunningJobs.get(state);
    if (map == null) {
      map = new HashMap<String, NotRunningJob>();
      notRunningJobs.put(state, map);
    }
    String user =
        (applicationReport == null) ?
            UNKNOWN_USER : applicationReport.getUser();
    NotRunningJob notRunningJob = map.get(user);
    if (notRunningJob == null) {
      notRunningJob = new NotRunningJob(applicationReport, state);
      map.put(user, notRunningJob);
    }
    return notRunningJob;
  }
}
 
源代码23 项目: big-c   文件: TestMRApp.java
@Test
public void testJobRebootOnLastRetryOnUnregistrationFailure()
    throws Exception {
  // make startCount as 2 since this is last retry which equals to
  // DEFAULT_MAX_AM_RETRY
  // The last param mocks the unregistration failure
  MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2, false);

  Configuration conf = new Configuration();
  Job job = app.submit(conf);
  app.waitForState(job, JobState.RUNNING);
  Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
  Iterator<Task> it = job.getTasks().values().iterator();
  Task task = it.next();
  app.waitForState(task, TaskState.RUNNING);

  //send an reboot event
  app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
    JobEventType.JOB_AM_REBOOT));

  app.waitForInternalState((JobImpl) job, JobStateInternal.REBOOT);
  // return exteranl state as RUNNING if this is the last retry while
  // unregistration fails
  app.waitForState(job, JobState.RUNNING);
}
 
源代码24 项目: hadoop   文件: JobImpl.java
@Override
public JobReport getReport() {
  readLock.lock();
  try {
    JobState state = getState();

    // jobFile can be null if the job is not yet inited.
    String jobFile =
        remoteJobConfFile == null ? "" : remoteJobConfFile.toString();

    StringBuilder diagsb = new StringBuilder();
    for (String s : getDiagnostics()) {
      diagsb.append(s).append("\n");
    }

    if (getInternalState() == JobStateInternal.NEW) {
      return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
          appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
          cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
    }

    computeProgress();
    JobReport report = MRBuilderUtils.newJobReport(jobId, jobName, username,
        state, appSubmitTime, startTime, finishTime, setupProgress,
        this.mapProgress, this.reduceProgress,
        cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
    return report;
  } finally {
    readLock.unlock();
  }
}
 
源代码25 项目: big-c   文件: TestJobImpl.java
@Test
public void testTransitionsAtFailed() throws IOException {
  Configuration conf = new Configuration();
  AsyncDispatcher dispatcher = new AsyncDispatcher();
  dispatcher.init(conf);
  dispatcher.start();

  OutputCommitter committer = mock(OutputCommitter.class);
  doThrow(new IOException("forcefail"))
    .when(committer).setupJob(any(JobContext.class));
  CommitterEventHandler commitHandler =
      createCommitterEventHandler(dispatcher, committer);
  commitHandler.init(conf);
  commitHandler.start();

  AppContext mockContext = mock(AppContext.class);
  when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
  JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
  JobId jobId = job.getID();
  job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
  assertJobState(job, JobStateInternal.INITED);
  job.handle(new JobStartEvent(jobId));
  assertJobState(job, JobStateInternal.FAILED);

  job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
  assertJobState(job, JobStateInternal.FAILED);
  job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
  assertJobState(job, JobStateInternal.FAILED);
  job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
  assertJobState(job, JobStateInternal.FAILED);
  job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
  assertJobState(job, JobStateInternal.FAILED);
  Assert.assertEquals(JobState.RUNNING, job.getState());
  when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
  Assert.assertEquals(JobState.FAILED, job.getState());

  dispatcher.stop();
  commitHandler.stop();
}
 
源代码26 项目: big-c   文件: TestJobHistoryEvents.java
@Test
public void testAssignedQueue() throws Exception {
  Configuration conf = new Configuration();
  MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(),
      true, "assignedQueue");
  app.submit(conf);
  Job job = app.getContext().getAllJobs().values().iterator().next();
  JobId jobId = job.getID();
  LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
  app.waitForState(job, JobState.SUCCEEDED);
  
  //make sure all events are flushed 
  app.waitForState(Service.STATE.STOPPED);
  /*
   * Use HistoryContext to read logged events and verify the number of 
   * completed maps 
  */
  HistoryContext context = new JobHistory();
  // test start and stop states
  ((JobHistory)context).init(conf);
  ((JobHistory)context).start();
  Assert.assertTrue( context.getStartTime()>0);
  Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);

  // get job before stopping JobHistory
  Job parsedJob = context.getJob(jobId);

  // stop JobHistory
  ((JobHistory)context).stop();
  Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);

  Assert.assertEquals("QueueName not correct", "assignedQueue",
      parsedJob.getQueueName());
}
 
源代码27 项目: hadoop   文件: TestMRClientService.java
private void verifyJobReport(JobReport jr) {
  Assert.assertNotNull("JobReport is null", jr);
  List<AMInfo> amInfos = jr.getAMInfos();
  Assert.assertEquals(1, amInfos.size());
  Assert.assertEquals(JobState.RUNNING, jr.getJobState());
  AMInfo amInfo = amInfos.get(0);
  Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
  Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
  Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
  Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
  Assert.assertEquals(1, amInfo.getContainerId().getApplicationAttemptId()
      .getAttemptId());
  Assert.assertTrue(amInfo.getStartTime() > 0);
  Assert.assertEquals(false, jr.isUber());
}
 
源代码28 项目: hadoop   文件: CompletedJob.java
private void constructJobReport() {
  report = Records.newRecord(JobReport.class);
  report.setJobId(jobId);
  report.setJobState(JobState.valueOf(jobInfo.getJobStatus()));
  report.setSubmitTime(jobInfo.getSubmitTime());
  report.setStartTime(jobInfo.getLaunchTime());
  report.setFinishTime(jobInfo.getFinishTime());
  report.setJobName(jobInfo.getJobname());
  report.setUser(jobInfo.getUsername());

  if ( getTotalMaps() == 0 ) {
    report.setMapProgress(1.0f);
  } else {
    report.setMapProgress((float) getCompletedMaps() / getTotalMaps());
  }
  if ( getTotalReduces() == 0 ) {
    report.setReduceProgress(1.0f);
  } else {
    report.setReduceProgress((float) getCompletedReduces() / getTotalReduces());
  }

  report.setJobFile(getConfFile().toString());
  String historyUrl = "N/A";
  try {
    historyUrl =
        MRWebAppUtil.getApplicationWebURLOnJHSWithoutScheme(conf,
            jobId.getAppId());
  } catch (UnknownHostException e) {
    //Ignore.
  }
  report.setTrackingUrl(historyUrl);
  report.setAMInfos(getAMInfos());
  report.setIsUber(isUber());
}
 
源代码29 项目: big-c   文件: JobImpl.java
private void rememberLastNonFinalState(JobStateInternal stateInternal) {
  JobState state = getExternalState(stateInternal);
  // if state is not the final state, set lastNonFinalState
  if (state != JobState.SUCCEEDED && state != JobState.FAILED
      && state != JobState.KILLED && state != JobState.ERROR) {
    lastNonFinalState = state;
  }
}
 
源代码30 项目: hadoop   文件: PartialJob.java
@Override
public JobState getState() {
  JobState js = null;
  try {
    js = JobState.valueOf(jobIndexInfo.getJobStatus());
  } catch (Exception e) {
    // Meant for use by the display UI. Exception would prevent it from being
    // rendered.e Defaulting to KILLED
    LOG.warn("Exception while parsing job state. Defaulting to KILLED", e);
    js = JobState.KILLED;
  }
  return js;
}
 
 同包方法