类org.apache.hadoop.mapreduce.Counters源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.Counters的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: big-c   文件: TestEvents.java
/**
 * test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
 * 
 * @throws Exception
 */
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {

  JobID jid = new JobID("001", 1);
  TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
  TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
  Counters counters = new Counters();
  TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
      TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
      counters);
  assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());

  assertEquals(test.getCounters(), counters);
  assertEquals(test.getFinishTime(), 123L);
  assertEquals(test.getHostname(), "HOSTNAME");
  assertEquals(test.getRackName(), "RAKNAME");
  assertEquals(test.getState(), "STATUS");
  assertEquals(test.getTaskId(), tid);
  assertEquals(test.getTaskStatus(), "TEST");
  assertEquals(test.getTaskType(), TaskType.REDUCE);

}
 
源代码2 项目: phoenix   文件: IndexScrutinyToolIT.java
/**
 * Tests an index with the same # of rows as the data table, but one of the index rows is
 * incorrect Scrutiny should report the invalid rows.
 */
@Test public void testEqualRowCountIndexIncorrect() throws Exception {
    // insert one valid row
    upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
    conn.commit();

    // disable the index and insert another row which is not indexed
    disableIndex();
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
    conn.commit();

    // insert a bad row into the index
    upsertIndexRow("badName", 2, 9999);
    conn.commit();

    // scrutiny should report the bad row
    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
}
 
源代码3 项目: datawave   文件: CounterDump.java
public String toString() {
    StringBuilder builder = new StringBuilder();
    
    while (source.hasNext()) {
        Entry<String,Counters> nextCntr = source.next();
        builder.append("\n").append(nextCntr.getKey()).append("\n----------------------\n");
        Counters counters = nextCntr.getValue();
        
        for (String groupName : counters.getGroupNames()) {
            
            CounterGroup group = counters.getGroup(groupName);
            Iterator<Counter> cntrItr = group.iterator();
            while (cntrItr.hasNext()) {
                Counter counter = cntrItr.next();
                builder.append(groupName).append("\t").append(counter.getDisplayName()).append("=").append(counter.getValue()).append("\n");
            }
            
        }
    }
    
    return builder.toString();
}
 
@Override
protected boolean runJob(Job job) throws ClassNotFoundException, IOException,
    InterruptedException {

  PerfCounters perfCounters = new PerfCounters();
  perfCounters.startClock();

  boolean success = doSubmitJob(job);
  perfCounters.stopClock();

  Counters jobCounters = job.getCounters();
  // If the job has been retired, these may be unavailable.
  if (null == jobCounters) {
    displayRetiredJobNotice(LOG);
  } else {
    perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters")
      .findCounter("HDFS_BYTES_READ").getValue());
    LOG.info("Transferred " + perfCounters.toString());
    long numRecords =  ConfigurationHelper.getNumMapInputRecords(job);
    LOG.info("Exported " + numRecords + " records.");
  }

  return success;
}
 
源代码5 项目: hadoop   文件: MapAttemptFinishedEvent.java
/** 
 * Create an event for successful completion of map attempts
 * @param id Task Attempt ID
 * @param taskType Type of the task
 * @param taskStatus Status of the task
 * @param mapFinishTime Finish time of the map phase
 * @param finishTime Finish time of the attempt
 * @param hostname Name of the host where the map executed
 * @param port RPC port for the tracker host.
 * @param rackName Name of the rack where the map executed
 * @param state State string for the attempt
 * @param counters Counters for the attempt
 * @param allSplits the "splits", or a pixelated graph of various
 *        measurable worker node state variables against progress.
 *        Currently there are four; wallclock time, CPU time,
 *        virtual memory and physical memory. 
 *
 *        If you have no splits data, code {@code null} for this
 *        parameter. 
 */
public MapAttemptFinishedEvent
    (TaskAttemptID id, TaskType taskType, String taskStatus, 
     long mapFinishTime, long finishTime, String hostname, int port, 
     String rackName, String state, Counters counters, int[][] allSplits) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.mapFinishTime = mapFinishTime;
  this.finishTime = finishTime;
  this.hostname = hostname;
  this.rackName = rackName;
  this.port = port;
  this.state = state;
  this.counters = counters;
  this.allSplits = allSplits;
  this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
  this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
  this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits);
  this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
  this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
}
 
源代码6 项目: phoenix   文件: IndexScrutinyToolIT.java
/**
 * Tests when there are more data table rows than index table rows Scrutiny should report the
 * number of incorrect rows
 */
@Test public void testMoreDataRows() throws Exception {
    upsertRow(dataTableUpsertStmt, 1, "name-1", 95123);
    conn.commit();
    disableIndex();
    // these rows won't have a corresponding index row
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95124);
    upsertRow(dataTableUpsertStmt, 3, "name-3", 95125);
    conn.commit();

    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(2, getCounterValue(counters, INVALID_ROW_COUNT));
}
 
源代码7 项目: big-c   文件: CompletedTask.java
private void constructTaskReport() {
  loadAllTaskAttempts();
  this.report = Records.newRecord(TaskReport.class);
  report.setTaskId(taskId);
  long minLaunchTime = Long.MAX_VALUE;
  for(TaskAttempt attempt: attempts.values()) {
    minLaunchTime = Math.min(minLaunchTime, attempt.getLaunchTime());
  }
  minLaunchTime = minLaunchTime == Long.MAX_VALUE ? -1 : minLaunchTime;
  report.setStartTime(minLaunchTime);
  report.setFinishTime(taskInfo.getFinishTime());
  report.setTaskState(getState());
  report.setProgress(getProgress());
  Counters counters = getCounters();
  if (counters == null) {
    counters = EMPTY_COUNTERS;
  }
  report.setCounters(TypeConverter.toYarn(counters));
  if (successfulAttempt != null) {
    report.setSuccessfulAttempt(successfulAttempt);
  }
  report.addAllDiagnostics(reportDiagnostics);
  report
      .addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
}
 
源代码8 项目: big-c   文件: TaskImpl.java
@Override
  public Counters getCounters() {
    Counters counters = null;
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt != null) {
        counters = bestAttempt.getCounters();
      } else {
        counters = TaskAttemptImpl.EMPTY_COUNTERS;
//        counters.groups = new HashMap<CharSequence, CounterGroup>();
      }
      return counters;
    } finally {
      readLock.unlock();
    }
  }
 
private void assertMapTask(int i, Counters counters) {
  for (CounterGroup counterGroup : counters) {
    String name = counterGroup.getName();
    boolean jobCounterGroup = false;
    if (name.equals("org.apache.hadoop.mapreduce.JobCounter")) {
      jobCounterGroup = true;
    } else if (name.equals("org.apache.hadoop.mapred.JobInProgress$Counter")) {
      jobCounterGroup = true;
    }
    if (jobCounterGroup) {
      for (Counter counter : counterGroup) {
        if (counter.getName().equals("TOTAL_LAUNCHED_MAPS")) {
          assertEquals(1, counter.getValue());
          return;
        }
      }
    }
  }
  fail();
}
 
源代码10 项目: big-c   文件: JobImpl.java
@Override
public Counters getAllCounters() {

  readLock.lock();

  try {
    JobStateInternal state = getInternalState();
    if (state == JobStateInternal.ERROR || state == JobStateInternal.FAILED
        || state == JobStateInternal.KILLED || state == JobStateInternal.SUCCEEDED) {
      this.mayBeConstructFinalFullCounters();
      return fullCounters;
    }

    Counters counters = new Counters();
    counters.incrAllCounters(jobCounters);
    return incrTaskCounters(counters, tasks.values());

  } finally {
    readLock.unlock();
  }
}
 
源代码11 项目: big-c   文件: TestFetchFailure.java
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
  TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
  status.counters = new Counters();
  status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
  status.id = attempt.getID();
  status.mapFinishTime = 0;
  status.phase = phase;
  status.progress = 0.5f;
  status.shuffleFinishTime = 0;
  status.sortFinishTime = 0;
  status.stateString = "OK";
  status.taskState = attempt.getState();
  TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
      status);
  app.getContext().getEventHandler().handle(event);
}
 
源代码12 项目: hbase   文件: IntegrationTestBigLinkedList.java
/**
 * Verify the values in the Counters against the expected number of entries written.
 *
 * @param expectedReferenced
 *          Expected number of referenced entrires
 * @param counters
 *          The Job's Counters object
 * @return True if the values match what's expected, false otherwise
 */
protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) {
  final Counter referenced = counters.findCounter(Counts.REFERENCED);
  final Counter unreferenced = counters.findCounter(Counts.UNREFERENCED);
  boolean success = true;

  if (expectedReferenced != referenced.getValue()) {
    LOG.error("Expected referenced count does not match with actual referenced count. " +
        "expected referenced=" + expectedReferenced + " ,actual=" + referenced.getValue());
    success = false;
  }

  if (unreferenced.getValue() > 0) {
    final Counter multiref = counters.findCounter(Counts.EXTRAREFERENCES);
    boolean couldBeMultiRef = (multiref.getValue() == unreferenced.getValue());
    LOG.error("Unreferenced nodes were not expected. Unreferenced count=" + unreferenced.getValue()
        + (couldBeMultiRef ? "; could be due to duplicate random numbers" : ""));
    success = false;
  }

  return success;
}
 
源代码13 项目: hbase   文件: TestTableMapReduce.java
/**
 * Verify scan counters are emitted from the job
 * @param job
 * @throws IOException
 */
private void verifyJobCountersAreEmitted(Job job) throws IOException {
  Counters counters = job.getCounters();
  Counter counter
    = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS");
  assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter);
  assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0);
}
 
private static long getRecordCount(Optional<Job> job) {

    if (!job.isPresent()) {
      return -1l;
    }

    Counters counters = null;
    try {
      counters = job.get().getCounters();
    } catch (IOException e) {
      LOG.debug("Failed to get job counters. Record count will not be set. ", e);
      return -1l;
    }

    Counter recordCounter = counters.findCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT);

    if (recordCounter != null && recordCounter.getValue() != 0) {
      return recordCounter.getValue();
    }

    recordCounter = counters.findCounter(RecordKeyMapperBase.EVENT_COUNTER.RECORD_COUNT);

    if (recordCounter != null && recordCounter.getValue() != 0) {
      return recordCounter.getValue();
    }

    LOG.debug("Non zero record count not found in both mapper and reducer counters");

    return -1l;
  }
 
源代码15 项目: geowave   文件: GeoWaveAnalyticJobRunner.java
@SuppressWarnings("rawtypes")
@Override
public int run(final String[] args) throws Exception {
  final Job job = mapReduceIntegrater.getJob(this);

  configure(job);

  final ScopedJobConfiguration configWrapper =
      new ScopedJobConfiguration(job.getConfiguration(), getScope());

  final FormatConfiguration inputFormat =
      configWrapper.getInstance(
          InputParameters.Input.INPUT_FORMAT,
          FormatConfiguration.class,
          null);

  if (inputFormat != null) {
    job.setInputFormatClass((Class<? extends InputFormat>) inputFormat.getFormatClass());
  }

  final FormatConfiguration outputFormat =
      configWrapper.getInstance(
          OutputParameters.Output.OUTPUT_FORMAT,
          FormatConfiguration.class,
          null);

  if (outputFormat != null) {
    job.setOutputFormatClass((Class<? extends OutputFormat>) outputFormat.getFormatClass());
  }

  job.setNumReduceTasks(configWrapper.getInt(OutputParameters.Output.REDUCER_COUNT, 1));

  job.setJobName(getJobName());

  job.setJarByClass(this.getClass());
  final Counters counters = mapReduceIntegrater.waitForCompletion(job);
  lastCounterSet = counters;
  return (counters == null) ? 1 : 0;
}
 
源代码16 项目: datawave   文件: CounterStatsDClient.java
public void sendFinalStats(Counters counters) {
    if (client != null) {
        for (CounterGroup group : counters) {
            for (Counter counter : group) {
                if (log.isTraceEnabled()) {
                    log.trace("Looking for aspect matching " + group.getName() + " / " + counter.getName());
                }
                CounterToStatsDConfiguration.StatsDAspect aspect = config.getAspect(CounterToStatsDConfiguration.StatsDOutputType.FINAL, group, counter);
                if (aspect != null) {
                    String fullName = aspect.getFullName(counter.getName());
                    if (log.isTraceEnabled()) {
                        log.trace("Sending " + aspect.getType() + '(' + fullName + " -> " + counter.getValue() + ')');
                    }
                    switch (aspect.getType()) {
                        case GAUGE:
                            client.gauge(fullName, counter.getValue());
                            break;
                        case COUNTER:
                            client.count(fullName, counter.getValue());
                            break;
                        default:
                            client.time(fullName, counter.getValue());
                    }
                }
            }
        }
    }
}
 
源代码17 项目: big-c   文件: TestUberAM.java
@Override
protected void verifyRandomWriterCounters(Job job)
    throws InterruptedException, IOException {
  super.verifyRandomWriterCounters(job);
  Counters counters = job.getCounters();
  Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
      .getValue());
  Assert.assertEquals(3,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
}
 
源代码18 项目: incubator-tez   文件: ClientServiceDelegate.java
public Counters getJobCounters(JobID jobId)
    throws IOException, InterruptedException {
  // FIXME needs counters support from DAG
  // with a translation layer on client side
  Counters empty = new Counters();
  return empty;
}
 
源代码19 项目: incubator-retired-blur   文件: IndexerJobDriver.java
private boolean runMrWithLookup(String uuid, TableDescriptor descriptor, List<Path> inprogressPathList, String table,
    Path fileCache, Path outputPath, int reducerMultipler, Path tmpPath, TableStats tableStats, String snapshot)
    throws ClassNotFoundException, IOException, InterruptedException {
  PartitionedInputResult result = buildPartitionedInputData(uuid, tmpPath, descriptor, inprogressPathList, snapshot,
      fileCache);

  Job job = Job.getInstance(getConf(), "Blur Row Updater for table [" + table + "]");

  ExistingDataIndexLookupMapper.setSnapshot(job, MRUPDATE_SNAPSHOT);
  FileInputFormat.addInputPath(job, result._partitionedInputData);
  MultipleInputs.addInputPath(job, result._partitionedInputData, SequenceFileInputFormat.class,
      ExistingDataIndexLookupMapper.class);

  for (Path p : inprogressPathList) {
    FileInputFormat.addInputPath(job, p);
    MultipleInputs.addInputPath(job, p, SequenceFileInputFormat.class, NewDataMapper.class);
  }

  BlurOutputFormat.setOutputPath(job, outputPath);
  BlurOutputFormat.setupJob(job, descriptor);

  job.setReducerClass(UpdateReducer.class);
  job.setMapOutputKeyClass(IndexKey.class);
  job.setMapOutputValueClass(IndexValue.class);
  job.setPartitionerClass(IndexKeyPartitioner.class);
  job.setGroupingComparatorClass(IndexKeyWritableComparator.class);

  BlurOutputFormat.setReducerMultiplier(job, reducerMultipler);

  boolean success = job.waitForCompletion(true);
  Counters counters = job.getCounters();
  LOG.info("Counters [" + counters + "]");
  return success;
}
 
源代码20 项目: hadoop   文件: TaskFinishedEvent.java
/**
 * Create an event to record the successful completion of a task
 * @param id Task ID
 * @param attemptId Task Attempt ID of the successful attempt for this task
 * @param finishTime Finish time of the task
 * @param taskType Type of the task
 * @param status Status string
 * @param counters Counters for the task
 */
public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime,
                         TaskType taskType,
                         String status, Counters counters) {
  this.taskid = id;
  this.successfulAttemptId = attemptId;
  this.finishTime = finishTime;
  this.taskType = taskType;
  this.status = status;
  this.counters = counters;
}
 
源代码21 项目: big-c   文件: TestJobHistoryEventHandler.java
private AppContext mockAppContext(ApplicationId appId, boolean isLastAMRetry) {
  JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId));
  AppContext mockContext = mock(AppContext.class);
  Job mockJob = mock(Job.class);
  when(mockJob.getAllCounters()).thenReturn(new Counters());
  when(mockJob.getTotalMaps()).thenReturn(10);
  when(mockJob.getTotalReduces()).thenReturn(10);
  when(mockJob.getName()).thenReturn("mockjob");
  when(mockContext.getJob(jobId)).thenReturn(mockJob);
  when(mockContext.getApplicationID()).thenReturn(appId);
  when(mockContext.isLastAMRetry()).thenReturn(isLastAMRetry);
  return mockContext;
}
 
源代码22 项目: Cubert   文件: ScriptStats.java
public ScriptStats()
{
    conf = new Configuration();
    try
    {
        jobClient = new JobClient(new JobConf(conf));
    }
    catch (IOException e)
    {
        throw new RuntimeException(e);
    }
    aggregate.startTime = System.currentTimeMillis();
    aggregate.counters = new Counters();
}
 
源代码23 项目: big-c   文件: TaskAttemptFinishedEvent.java
/**
 * Create an event to record successful finishes for setup and cleanup 
 * attempts
 * @param id Attempt ID
 * @param taskType Type of task
 * @param taskStatus Status of task
 * @param finishTime Finish time of attempt
 * @param hostname Host where the attempt executed
 * @param state State string
 * @param counters Counters for the attempt
 */
public TaskAttemptFinishedEvent(TaskAttemptID id, 
    TaskType taskType, String taskStatus, 
    long finishTime, String rackName,
    String hostname, String state, Counters counters) {
  this.attemptId = id;
  this.taskType = taskType;
  this.taskStatus = taskStatus;
  this.finishTime = finishTime;
  this.rackName = rackName;
  this.hostname = hostname;
  this.state = state;
  this.counters = counters;
}
 
源代码24 项目: big-c   文件: HistoryEventEmitter.java
protected static Counters maybeParseCounters(String counters) {
  try {
    return parseCounters(counters);
  } catch (ParseException e) {
    LOG.warn("The counter string, \"" + counters + "\" is badly formatted.");
    return null;
  }
}
 
源代码25 项目: big-c   文件: TestJobHistoryEventHandler.java
@Test (timeout=50000)
public void testCountersToJSON() throws Exception {
  JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0);
  Counters counters = new Counters();
  CounterGroup group1 = counters.addGroup("DOCTORS",
          "Incarnations of the Doctor");
  group1.addCounter("PETER_CAPALDI", "Peter Capaldi", 12);
  group1.addCounter("MATT_SMITH", "Matt Smith", 11);
  group1.addCounter("DAVID_TENNANT", "David Tennant", 10);
  CounterGroup group2 = counters.addGroup("COMPANIONS",
          "Companions of the Doctor");
  group2.addCounter("CLARA_OSWALD", "Clara Oswald", 6);
  group2.addCounter("RORY_WILLIAMS", "Rory Williams", 5);
  group2.addCounter("AMY_POND", "Amy Pond", 4);
  group2.addCounter("MARTHA_JONES", "Martha Jones", 3);
  group2.addCounter("DONNA_NOBLE", "Donna Noble", 2);
  group2.addCounter("ROSE_TYLER", "Rose Tyler", 1);
  JsonNode jsonNode = jheh.countersToJSON(counters);
  String jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
  String expected = "[{\"NAME\":\"COMPANIONS\",\"DISPLAY_NAME\":\"Companions "
      + "of the Doctor\",\"COUNTERS\":[{\"NAME\":\"AMY_POND\",\"DISPLAY_NAME\""
      + ":\"Amy Pond\",\"VALUE\":4},{\"NAME\":\"CLARA_OSWALD\","
      + "\"DISPLAY_NAME\":\"Clara Oswald\",\"VALUE\":6},{\"NAME\":"
      + "\"DONNA_NOBLE\",\"DISPLAY_NAME\":\"Donna Noble\",\"VALUE\":2},"
      + "{\"NAME\":\"MARTHA_JONES\",\"DISPLAY_NAME\":\"Martha Jones\","
      + "\"VALUE\":3},{\"NAME\":\"RORY_WILLIAMS\",\"DISPLAY_NAME\":\"Rory "
      + "Williams\",\"VALUE\":5},{\"NAME\":\"ROSE_TYLER\",\"DISPLAY_NAME\":"
      + "\"Rose Tyler\",\"VALUE\":1}]},{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\""
      + ":\"Incarnations of the Doctor\",\"COUNTERS\":[{\"NAME\":"
      + "\"DAVID_TENNANT\",\"DISPLAY_NAME\":\"David Tennant\",\"VALUE\":10},"
      + "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":"
      + "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\","
      + "\"VALUE\":12}]}]";
  Assert.assertEquals(expected, jsonStr);
}
 
源代码26 项目: big-c   文件: TestUberAM.java
@Override
protected void verifyFailingMapperCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  super.verifyFailingMapperCounters(job);
  Assert.assertEquals(2,
      counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS)
      .getValue());
  Assert.assertEquals(2, counters
      .findCounter(JobCounter.NUM_FAILED_UBERTASKS).getValue());
}
 
源代码27 项目: hadoop   文件: TestJobHistoryEventHandler.java
@Test (timeout=50000)
public void testCountersToJSON() throws Exception {
  JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0);
  Counters counters = new Counters();
  CounterGroup group1 = counters.addGroup("DOCTORS",
          "Incarnations of the Doctor");
  group1.addCounter("PETER_CAPALDI", "Peter Capaldi", 12);
  group1.addCounter("MATT_SMITH", "Matt Smith", 11);
  group1.addCounter("DAVID_TENNANT", "David Tennant", 10);
  CounterGroup group2 = counters.addGroup("COMPANIONS",
          "Companions of the Doctor");
  group2.addCounter("CLARA_OSWALD", "Clara Oswald", 6);
  group2.addCounter("RORY_WILLIAMS", "Rory Williams", 5);
  group2.addCounter("AMY_POND", "Amy Pond", 4);
  group2.addCounter("MARTHA_JONES", "Martha Jones", 3);
  group2.addCounter("DONNA_NOBLE", "Donna Noble", 2);
  group2.addCounter("ROSE_TYLER", "Rose Tyler", 1);
  JsonNode jsonNode = jheh.countersToJSON(counters);
  String jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
  String expected = "[{\"NAME\":\"COMPANIONS\",\"DISPLAY_NAME\":\"Companions "
      + "of the Doctor\",\"COUNTERS\":[{\"NAME\":\"AMY_POND\",\"DISPLAY_NAME\""
      + ":\"Amy Pond\",\"VALUE\":4},{\"NAME\":\"CLARA_OSWALD\","
      + "\"DISPLAY_NAME\":\"Clara Oswald\",\"VALUE\":6},{\"NAME\":"
      + "\"DONNA_NOBLE\",\"DISPLAY_NAME\":\"Donna Noble\",\"VALUE\":2},"
      + "{\"NAME\":\"MARTHA_JONES\",\"DISPLAY_NAME\":\"Martha Jones\","
      + "\"VALUE\":3},{\"NAME\":\"RORY_WILLIAMS\",\"DISPLAY_NAME\":\"Rory "
      + "Williams\",\"VALUE\":5},{\"NAME\":\"ROSE_TYLER\",\"DISPLAY_NAME\":"
      + "\"Rose Tyler\",\"VALUE\":1}]},{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\""
      + ":\"Incarnations of the Doctor\",\"COUNTERS\":[{\"NAME\":"
      + "\"DAVID_TENNANT\",\"DISPLAY_NAME\":\"David Tennant\",\"VALUE\":10},"
      + "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":"
      + "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\","
      + "\"VALUE\":12}]}]";
  Assert.assertEquals(expected, jsonStr);
}
 
源代码28 项目: hadoop   文件: TestMiniMRClientCluster.java
private void validateCounters(Counters counters, long mapInputRecords,
    long mapOutputRecords, long reduceInputGroups, long reduceOutputRecords) {
  assertEquals("MapInputRecords", mapInputRecords, counters.findCounter(
      "MyCounterGroup", "MAP_INPUT_RECORDS").getValue());
  assertEquals("MapOutputRecords", mapOutputRecords, counters.findCounter(
      "MyCounterGroup", "MAP_OUTPUT_RECORDS").getValue());
  assertEquals("ReduceInputGroups", reduceInputGroups, counters.findCounter(
      "MyCounterGroup", "REDUCE_INPUT_GROUPS").getValue());
  assertEquals("ReduceOutputRecords", reduceOutputRecords, counters
      .findCounter("MyCounterGroup", "REDUCE_OUTPUT_RECORDS").getValue());
}
 
源代码29 项目: big-c   文件: TaskFailedEvent.java
/**
 * Create an event to record task failure
 * @param id Task ID
 * @param finishTime Finish time of the task
 * @param taskType Type of the task
 * @param error Error String
 * @param status Status
 * @param failedDueToAttempt The attempt id due to which the task failed
 * @param counters Counters for the task
 */
public TaskFailedEvent(TaskID id, long finishTime, 
    TaskType taskType, String error, String status,
    TaskAttemptID failedDueToAttempt, Counters counters) {
  this.id = id;
  this.finishTime = finishTime;
  this.taskType = taskType;
  this.error = error;
  this.status = status;
  this.failedDueToAttempt = failedDueToAttempt;
  this.counters = counters;
}
 
源代码30 项目: hadoop   文件: TestMRJobs.java
protected void verifyFailingMapperCounters(Job job)
    throws InterruptedException, IOException {
  Counters counters = job.getCounters();
  Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS)
      .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
      .getValue());
  Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS)
      .getValue());
  Assert
      .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null
          && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0);
}
 
 同包方法