org.apache.hadoop.mapred.JobHistory#Keys ( )源码实例Demo

下面列出了org.apache.hadoop.mapred.JobHistory#Keys ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: spork   文件: HadoopJobHistoryLoader.java
private static Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(
        JobHistory.Task task) {

    Map<String, JobHistory.TaskAttempt> taskAttempts = task
            .getTaskAttempts();
    int size = taskAttempts.size();
    Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts
            .entrySet().iterator();
    for (int i = 0; i < size; i++) {
        // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
        Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
        JobHistory.TaskAttempt attempt = tae.getValue();
        if (null != attempt && null != attempt.getValues() && attempt.getValues().containsKey(JobHistory.Keys.TASK_STATUS) && attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals(
                "SUCCESS")) {
            return attempt.getValues();
        }
    }

    return null;
}
 
源代码2 项目: RDFS   文件: JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
  
  Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
  int size = taskAttempts.size();
  java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
  for (int i=0; i<size; i++) {
    // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
    Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
    JobHistory.TaskAttempt attempt = tae.getValue();
    if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
      return attempt.getValues();
    }
  }
  
  return null;
}
 
源代码3 项目: hadoop-gpu   文件: JobStatistics.java
private java.util.Map<JobHistory.Keys, String> getLastSuccessfulTaskAttempt(JobHistory.Task task) {
  
  Map<String, JobHistory.TaskAttempt> taskAttempts = task.getTaskAttempts();
  int size = taskAttempts.size();
  java.util.Iterator<Map.Entry<String, JobHistory.TaskAttempt>> kv = taskAttempts.entrySet().iterator();
  for (int i=0; i<size; i++) {
    // CHECK_IT: Only one SUCCESSFUL TASK ATTEMPT
    Map.Entry<String, JobHistory.TaskAttempt> tae = kv.next();
    JobHistory.TaskAttempt attempt = tae.getValue();
    if (attempt.getValues().get(JobHistory.Keys.TASK_STATUS).equals("SUCCESS")) {
      return attempt.getValues();
    }
  }
  
  return null;
}
 
源代码4 项目: spork   文件: HadoopJobHistoryLoader.java
private static void populateJob (Map<JobHistory.Keys, String> jobC, Map<String, String> job) {            
    int size = jobC.size();
    Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
    for (int i = 0; i < size; i++) {
        Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
        JobHistory.Keys key = entry.getKey();
        String value = entry.getValue();
        switch (key) {
        case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID.toString(), value); break;           
        case FINISH_TIME: job.put(JobKeys.FINISH_TIME.toString(), value); break;
        case JOBID: job.put(JobKeys.JOBID.toString(), value); break;
        case JOBNAME: job.put(JobKeys.JOBNAME.toString(), value); break;
        case USER: job.put(JobKeys.USER.toString(), value); break;
        case JOBCONF: job.put(JobKeys.JOBCONF.toString(), value); break;
        case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME.toString(), value); break;
        case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME.toString(), value); break;
        case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS.toString(), value); break;
        case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES.toString(), value); break;
        case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS.toString(), value); break;
        case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES.toString(), value); break;
        case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS.toString(), value); break;
        case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES.toString(), value); break;
        case JOB_STATUS: job.put(JobKeys.STATUS.toString(), value); break;
        case COUNTERS:
            value.concat(",");
            parseAndAddJobCounters(job, value);
            break;
        default: 
            LOG.debug("JobHistory.Keys."+ key + " : NOT INCLUDED IN LOADER RETURN VALUE");
            break;
        }
    }
}
 
源代码5 项目: RDFS   文件: JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
  int size = jobC.size(); 
  java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
  for (int i = 0; i < size; i++)
  {
    Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
    JobHistory.Keys key = entry.getKey();
    String value = entry.getValue();
    //System.out.println("JobHistory.JobKeys."+key+": "+value);
    switch (key) {
    case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
    case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
    case JOBID: job.put(JobKeys.JOBID, value); break;
    case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
    case USER: job.put(JobKeys.USER, value); break;
    case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
    case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
    case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
    case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
    case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
    case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
    case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
    case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
    case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
    case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
    case JOB_PRIORITY: job.put(JobKeys.JOB_PRIORITY, value); break;
    case COUNTERS:
      value.concat(",");
      parseAndAddJobCounters(job, value);
      break;
    default:   System.err.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
             break;
    }
  }
}
 
源代码6 项目: hadoop-gpu   文件: JobStatistics.java
private void populate_Job (Hashtable<Enum, String> job, java.util.Map<JobHistory.Keys, String> jobC) throws ParseException {
  int size = jobC.size(); 
  java.util.Iterator<Map.Entry<JobHistory.Keys, String>> kv = jobC.entrySet().iterator();
  for (int i = 0; i < size; i++)
  {
    Map.Entry<JobHistory.Keys, String> entry = (Map.Entry<JobHistory.Keys, String>) kv.next();
    JobHistory.Keys key = entry.getKey();
    String value = entry.getValue();
    switch (key) {
    case JOBTRACKERID: job.put(JobKeys.JOBTRACKERID, value); break;
    //case START_TIME: job.put(JobKeys., value); break;
    case FINISH_TIME: job.put(JobKeys.FINISH_TIME, value); break;
    case JOBID: job.put(JobKeys.JOBID, value); break;
    case JOBNAME: job.put(JobKeys.JOBNAME, value); break;
    case USER: job.put(JobKeys.USER, value); break;
    case JOBCONF: job.put(JobKeys.JOBCONF, value); break;
    case SUBMIT_TIME: job.put(JobKeys.SUBMIT_TIME, value); break;
    case LAUNCH_TIME: job.put(JobKeys.LAUNCH_TIME, value); break;
    case TOTAL_MAPS: job.put(JobKeys.TOTAL_MAPS, value); break;
    case TOTAL_REDUCES: job.put(JobKeys.TOTAL_REDUCES, value); break;
    case FAILED_MAPS: job.put(JobKeys.FAILED_MAPS, value); break;
    case FAILED_REDUCES: job.put(JobKeys.FAILED_REDUCES, value); break;
    case FINISHED_MAPS: job.put(JobKeys.FINISHED_MAPS, value); break;
    case FINISHED_REDUCES: job.put(JobKeys.FINISHED_REDUCES, value); break;
    case JOB_STATUS: job.put(JobKeys.STATUS, value); break;
    case COUNTERS:
      value.concat(",");
      parseAndAddJobCounters(job, value);
      break;
    default:   System.out.println("JobHistory.Keys."+key+" : NOT INCLUDED IN PERFORMANCE ADVISOR COUNTERS");
             break;
    }
  }
}