org.apache.hadoop.mapred.Counters#fromEscapedCompactString ( )源码实例Demo

下面列出了org.apache.hadoop.mapred.Counters#fromEscapedCompactString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hiped2   文件: JobHistoryHelper.java
public static String extractCounter(String counterFromHist,
                                    String... counterNames)
    throws ParseException {
  Counters counters =
      Counters.fromEscapedCompactString(counterFromHist);
  for (Counters.Group group : counters) {
    for (Counters.Counter counter : group) {
      for (String counterName : counterNames) {
        if (counterName.equals(counter.getName())) {
          return String.valueOf(counter.getCounter());
        }
      }
    }
  }
  return null;
}
 
源代码2 项目: hiped2   文件: ExtractJobMetrics.java
public static String extractCounter(String counterFromHist,
                                    String... counterNames)
    throws ParseException {
  Counters counters =
      Counters.fromEscapedCompactString(counterFromHist);
  for (Counters.Group group : counters) {
    for (Counters.Counter counter : group) {
      for (String counterName : counterNames) {
        if (counterName.equals(counter.getName())) {
          return String.valueOf(counter.getCounter());
        }
      }
    }
  }
  return "";
}
 
源代码3 项目: spork   文件: HadoopJobHistoryLoader.java
@SuppressWarnings("deprecation")
private static void parseAndAddJobCounters(Map<String, String> job, String counters) {
    try {
        Counters counterGroups = Counters.fromEscapedCompactString(counters);
        for (Group otherGroup : counterGroups) {
            Group group = counterGroups.getGroup(otherGroup.getName());
            for (Counter otherCounter : otherGroup) {
                Counter counter = group.getCounterForName(otherCounter.getName());
                job.put(otherCounter.getName(), String.valueOf(counter.getValue()));
            }
        }
    } catch (ParseException e) {
       LOG.warn("Failed to parse job counters", e);
    }
}
 
源代码4 项目: RDFS   文件: JobStatistics.java
private void parseAndAddJobCounters(Hashtable<Enum, String> job, String counters) throws ParseException {
  Counters cnt = Counters.fromEscapedCompactString(counters);
  for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
    Counters.Group grp = grps.next();
    //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
    for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
      Counters.Counter counter = mycounters.next();
      //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
      //System.err.println("groupName:"+groupname+",countername: "+countername);
      String countername = grp.getDisplayName()+"."+counter.getDisplayName();
      String value = (new Long(counter.getValue())).toString();
      String[] parts = {countername,value};
      //System.err.println("part0:<"+parts[0]+">,:part1 <"+parts[1]+">");
      if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) {
        job.put(JobKeys.FILE_BYTES_READ, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) {
        job.put(JobKeys.FILE_BYTES_WRITTEN, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) {
        job.put(JobKeys.HDFS_BYTES_READ, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) {
        job.put(JobKeys.HDFS_BYTES_WRITTEN, parts[1]);
      } else if (parts[0].equals("Job Counters .Launched map tasks")) {
        job.put(JobKeys.LAUNCHED_MAPS, parts[1]);
      } else if (parts[0].equals("Job Counters .Launched reduce tasks")) {
        job.put(JobKeys.LAUNCHED_REDUCES, parts[1]);
      } else if (parts[0].equals("Job Counters .Data-local map tasks")) {
        job.put(JobKeys.DATALOCAL_MAPS, parts[1]);
      } else if (parts[0].equals("Job Counters .Rack-local map tasks")) {
        job.put(JobKeys.RACKLOCAL_MAPS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map input records")) {
        job.put(JobKeys.MAP_INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map output records")) {
        job.put(JobKeys.MAP_OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) {
        job.put(JobKeys.MAP_INPUT_BYTES, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) {
        job.put(JobKeys.MAP_OUTPUT_BYTES, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) {
        job.put(JobKeys.COMBINE_INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) {
        job.put(JobKeys.COMBINE_OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) {
        job.put(JobKeys.REDUCE_INPUT_GROUPS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) {
        job.put(JobKeys.REDUCE_INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) {
        job.put(JobKeys.REDUCE_OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) {
        job.put(JobKeys.SPILLED_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce shuffle bytes")) {
        job.put(JobKeys.SHUFFLE_BYTES, parts[1]);
      } else {
        System.err.println("JobCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR");
      }
    }
  }  
}
 
源代码5 项目: RDFS   文件: JobStatistics.java
private void parseAndAddMapTaskCounters(MapTaskStatistics mapTask, String counters) throws ParseException {
  Counters cnt = Counters.fromEscapedCompactString(counters);
  for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
    Counters.Group grp = grps.next();
    //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
    for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
      Counters.Counter counter = mycounters.next();
      //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
      //System.out.println("groupName:"+groupname+",countername: "+countername);
      String countername = grp.getDisplayName()+"."+counter.getDisplayName();
      String value = (new Long(counter.getValue())).toString();
      String[] parts = {countername,value};
      //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]);
      if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) {
        mapTask.setValue(MapTaskKeys.FILE_BYTES_READ, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) {
        mapTask.setValue(MapTaskKeys.FILE_BYTES_WRITTEN, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) {
        mapTask.setValue(MapTaskKeys.HDFS_BYTES_READ, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) {
        mapTask.setValue(MapTaskKeys.HDFS_BYTES_WRITTEN, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map input records")) {
        mapTask.setValue(MapTaskKeys.INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map output records")) {
        mapTask.setValue(MapTaskKeys.OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) {
        mapTask.setValue(MapTaskKeys.INPUT_BYTES, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) {
        mapTask.setValue(MapTaskKeys.OUTPUT_BYTES, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) {
        mapTask.setValue(MapTaskKeys.COMBINE_INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) {
        mapTask.setValue(MapTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) {
        mapTask.setValue(MapTaskKeys.SPILLED_RECORDS, parts[1]);
      } else {
        System.err.println("MapCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK");
      }
    }    
  }
}
 
源代码6 项目: RDFS   文件: JobStatistics.java
private void parseAndAddReduceTaskCounters(ReduceTaskStatistics reduceTask, String counters) throws ParseException {
  Counters cnt = Counters.fromEscapedCompactString(counters);
  for (java.util.Iterator<Counters.Group> grps = cnt.iterator(); grps.hasNext(); ) {
    Counters.Group grp = grps.next();
    //String groupname = "<" + grp.getName() + ">::<" + grp.getDisplayName() + ">";
    for (java.util.Iterator<Counters.Counter> mycounters = grp.iterator(); mycounters.hasNext(); ) {
      Counters.Counter counter = mycounters.next();
      //String countername = "<"+counter.getName()+">::<"+counter.getDisplayName()+">::<"+counter.getValue()+">";
      //System.out.println("groupName:"+groupname+",countername: "+countername);
      String countername = grp.getDisplayName()+"."+counter.getDisplayName();
      String value = (new Long(counter.getValue())).toString();
      String[] parts = {countername,value};
      //System.out.println("part0:"+parts[0]+",:part1 "+parts[1]);
      if (parts[0].equals("FileSystemCounters.FILE_BYTES_READ")) {
        reduceTask.setValue(ReduceTaskKeys.FILE_BYTES_READ, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.FILE_BYTES_WRITTEN")) {
        reduceTask.setValue(ReduceTaskKeys.FILE_BYTES_WRITTEN, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_READ")) {
        reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_READ, parts[1]);
      } else if (parts[0].equals("FileSystemCounters.HDFS_BYTES_WRITTEN")) {
        reduceTask.setValue(ReduceTaskKeys.HDFS_BYTES_WRITTEN, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce input records")) {
        reduceTask.setValue(ReduceTaskKeys.INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce output records")) {
        reduceTask.setValue(ReduceTaskKeys.OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) {
        reduceTask.setValue(ReduceTaskKeys.COMBINE_INPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Combine output records")) {
        reduceTask.setValue(ReduceTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce input groups")) {
        reduceTask.setValue(ReduceTaskKeys.INPUT_GROUPS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) {
        reduceTask.setValue(ReduceTaskKeys.SPILLED_RECORDS, parts[1]);
      } else if (parts[0].equals("Map-Reduce Framework.Reduce shuffle bytes")) {
        reduceTask.setValue(ReduceTaskKeys.SHUFFLE_BYTES, parts[1]);
      } else {
        System.err.println("ReduceCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR REDUCE TASK");
      }
    }
  }    
}