类org.apache.hadoop.mapreduce.counters.GenericCounter源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.counters.GenericCounter的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: kylin   文件: HadoopMultipleOutputFormat.java
@Override
public void open(int taskNumber, int numTasks) throws IOException {
    super.open(taskNumber, numTasks);

    synchronized (OPEN_MULTIPLE_MUTEX) {
        try {
            TaskInputOutputContext taskInputOutputContext = new ReduceContextImpl(configuration,
                    context.getTaskAttemptID(), new InputIterator(), new GenericCounter(), new GenericCounter(),
                    recordWriter, outputCommitter, new DummyReporter(), null,
                    BytesWritable.class, BytesWritable.class);
            this.writer = new MultipleOutputs(taskInputOutputContext);
        } catch (InterruptedException e) {
            throw new IOException("Could not create MultipleOutputs.", e);
        }
    }
}
 
源代码2 项目: sqoop-on-spark   文件: TestSqoopLoader.java
@BeforeMethod(alwaysRun = true)
public void setUp() {
  conf = new Configuration();
  conf.setIfUnset(MRJobConstants.TO_INTERMEDIATE_DATA_FORMAT,
      CSVIntermediateDataFormat.class.getName());
  jobContextMock = mock(TaskAttemptContext.class);
  GenericCounter counter = new GenericCounter("test", "test-me");
  when(((TaskAttemptContext) jobContextMock).getCounter(SqoopCounters.ROWS_WRITTEN)).thenReturn(counter);
  org.apache.hadoop.mapred.JobConf testConf = new org.apache.hadoop.mapred.JobConf();
  when(jobContextMock.getConfiguration()).thenReturn(testConf);
}
 
源代码3 项目: hadoop   文件: Counters.java
public Counter() {
  this(new GenericCounter());
}
 
源代码4 项目: hadoop   文件: Counters.java
@Override
protected Counter newCounter(String counterName, String displayName,
                             long value) {
  return new Counter(new GenericCounter(counterName, displayName, value));
}
 
源代码5 项目: hadoop   文件: Counters.java
@Override
protected Counter newCounter(String name, String displayName, long value) {
  return new GenericCounter(name, displayName, value);
}
 
源代码6 项目: hadoop   文件: Counters.java
@Override
protected Counter newCounter() {
  return new GenericCounter();
}
 
源代码7 项目: hadoop   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testLoadJobLoadReducer() throws Exception {
  LoadJob.LoadReducer test = new LoadJob.LoadReducer();

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskid = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  LoadRecordWriter output = new LoadRecordWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
          conf, taskid, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, GridmixRecord.class);
  // read for previous data
  reduceContext.nextKeyValue();
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
          .getReducerContext(reduceContext);

  // test.setup(context);
  test.run(context);
  // have been readed 9 records (-1 for previous)
  assertEquals(9, counter.getValue());
  assertEquals(10, inputValueCounter.getValue());
  assertEquals(1, output.getData().size());
  GridmixRecord record = output.getData().values().iterator()
          .next();

  assertEquals(1593, record.getSize());
}
 
源代码8 项目: hadoop   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testSleepReducer() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskId = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
          conf, taskId, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, NullWritable.class);
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
          .getReducerContext(reducecontext);

  SleepReducer test = new SleepReducer();
  long start = System.currentTimeMillis();
  test.setup(context);
  long sleeper = context.getCurrentKey().getReduceOutputBytes();
  // status has been changed
  assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
  // should sleep 0.9 sec

  assertTrue(System.currentTimeMillis() >= (start + sleeper));
  test.cleanup(context);
  // status has been changed again

  assertEquals("Slept for " + sleeper, context.getStatus());

}
 
源代码9 项目: big-c   文件: Counters.java
public Counter() {
  this(new GenericCounter());
}
 
源代码10 项目: big-c   文件: Counters.java
@Override
protected Counter newCounter(String counterName, String displayName,
                             long value) {
  return new Counter(new GenericCounter(counterName, displayName, value));
}
 
源代码11 项目: big-c   文件: Counters.java
@Override
protected Counter newCounter(String name, String displayName, long value) {
  return new GenericCounter(name, displayName, value);
}
 
源代码12 项目: big-c   文件: Counters.java
@Override
protected Counter newCounter() {
  return new GenericCounter();
}
 
源代码13 项目: big-c   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testLoadJobLoadReducer() throws Exception {
  LoadJob.LoadReducer test = new LoadJob.LoadReducer();

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskid = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  LoadRecordWriter output = new LoadRecordWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
          conf, taskid, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, GridmixRecord.class);
  // read for previous data
  reduceContext.nextKeyValue();
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
          .getReducerContext(reduceContext);

  // test.setup(context);
  test.run(context);
  // have been readed 9 records (-1 for previous)
  assertEquals(9, counter.getValue());
  assertEquals(10, inputValueCounter.getValue());
  assertEquals(1, output.getData().size());
  GridmixRecord record = output.getData().values().iterator()
          .next();

  assertEquals(1593, record.getSize());
}
 
源代码14 项目: big-c   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testSleepReducer() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskId = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
          conf, taskId, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, NullWritable.class);
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
          .getReducerContext(reducecontext);

  SleepReducer test = new SleepReducer();
  long start = System.currentTimeMillis();
  test.setup(context);
  long sleeper = context.getCurrentKey().getReduceOutputBytes();
  // status has been changed
  assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
  // should sleep 0.9 sec

  assertTrue(System.currentTimeMillis() >= (start + sleeper));
  test.cleanup(context);
  // status has been changed again

  assertEquals("Slept for " + sleeper, context.getStatus());

}
 
源代码15 项目: incubator-gobblin   文件: KeyDedupReducerTest.java
@Test
public void testAvroReduce()
    throws IOException, InterruptedException {
  Schema keySchema = new Schema.Parser().parse(AVRO_KEY_SCHEMA);
  GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(keySchema.getField("key").schema());
  keyRecordBuilder.set("partitionKey", 1);
  keyRecordBuilder.set("environment", "test");
  keyRecordBuilder.set("subKey", "2");
  GenericRecord record = keyRecordBuilder.build();
  keyRecordBuilder = new GenericRecordBuilder(keySchema);
  keyRecordBuilder.set("key", record);
  GenericRecord keyRecord = keyRecordBuilder.build();

  // Test reducer with delta field "scn"
  Schema fullSchema = new Schema.Parser().parse(AVRO_FULL_SCHEMA);
  AvroValue<GenericRecord> fullRecord1 = new AvroValue<>();
  AvroValue<GenericRecord> fullRecord2 = new AvroValue<>();
  AvroValue<GenericRecord> fullRecord3 = new AvroValue<>();
  AvroValue<GenericRecord> fullRecord4 = new AvroValue<>();

  GenericRecordBuilder fullRecordBuilder1 = new GenericRecordBuilder(fullSchema);
  fullRecordBuilder1.set("key", record);
  fullRecordBuilder1.set("scn", 123);
  fullRecordBuilder1.set("scn2", 100);
  fullRecord1.datum(fullRecordBuilder1.build());
  fullRecordBuilder1.set("scn", 125);
  fullRecordBuilder1.set("scn2", 1);
  fullRecord2.datum(fullRecordBuilder1.build());
  fullRecordBuilder1.set("scn", 124);
  fullRecordBuilder1.set("scn2", 10);
  fullRecord3.datum(fullRecordBuilder1.build());
  fullRecordBuilder1.set("scn", 122);
  fullRecordBuilder1.set("scn2", 1000);
  fullRecord4.datum(fullRecordBuilder1.build());

  Configuration conf = mock(Configuration.class);
  when(conf.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER))
      .thenReturn(FieldAttributeBasedDeltaFieldsProvider.class.getName());
  when(conf.get(FieldAttributeBasedDeltaFieldsProvider.ATTRIBUTE_FIELD)).thenReturn("attributes_json");

  when(conf.get(FieldAttributeBasedDeltaFieldsProvider.DELTA_PROP_NAME,
      FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME))
      .thenReturn(FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME);
  RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
      AvroKey<GenericRecord>, NullWritable> reducer = new AvroKeyDedupReducer();

  WrappedReducer.Context reducerContext = mock(WrappedReducer.Context.class);
  when(reducerContext.getConfiguration()).thenReturn(conf);
  Counter moreThan1Counter = new GenericCounter();
  when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.MORE_THAN_1)).thenReturn(moreThan1Counter);

  Counter dedupedCounter = new GenericCounter();
  when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.DEDUPED)).thenReturn(dedupedCounter);

  Counter recordCounter = new GenericCounter();
  when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT)).thenReturn(recordCounter);
  reducer.setup(reducerContext);

  doNothing().when(reducerContext).write(any(AvroKey.class), any(NullWritable.class));
  List<AvroValue<GenericRecord>> valueIterable =
      Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4);

  AvroKey<GenericRecord> key = new AvroKey<>();
  key.datum(keyRecord);
  reducer.reduce(key, valueIterable, reducerContext);
  Assert.assertEquals(reducer.getOutKey().datum(), fullRecord2.datum());

  // Test reducer without delta field
  Configuration conf2 = mock(Configuration.class);
  when(conf2.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER)).thenReturn(null);
  when(reducerContext.getConfiguration()).thenReturn(conf2);
  RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
      AvroKey<GenericRecord>, NullWritable> reducer2 = new AvroKeyDedupReducer();
  reducer2.setup(reducerContext);
  reducer2.reduce(key, valueIterable, reducerContext);
  Assert.assertEquals(reducer2.getOutKey().datum(), fullRecord1.datum());

  // Test reducer with compound delta key.
  Schema fullSchema2 = new Schema.Parser().parse(AVRO_FULL_SCHEMA_WITH_TWO_DELTA_FIELDS);
  GenericRecordBuilder fullRecordBuilder2 = new GenericRecordBuilder(fullSchema2);
  fullRecordBuilder2.set("key", record);
  fullRecordBuilder2.set("scn", 123);
  fullRecordBuilder2.set("scn2", 100);
  fullRecord1.datum(fullRecordBuilder2.build());
  fullRecordBuilder2.set("scn", 125);
  fullRecordBuilder2.set("scn2", 1000);
  fullRecord2.datum(fullRecordBuilder2.build());
  fullRecordBuilder2.set("scn", 126);
  fullRecordBuilder2.set("scn2", 1000);
  fullRecord3.datum(fullRecordBuilder2.build());
  fullRecordBuilder2.set("scn", 130);
  fullRecordBuilder2.set("scn2", 100);
  fullRecord4.datum(fullRecordBuilder2.build());
  List<AvroValue<GenericRecord>> valueIterable2 =
      Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4);
  reducer.reduce(key, valueIterable2, reducerContext);
  Assert.assertEquals(reducer.getOutKey().datum(), fullRecord3.datum());

}
 
 类方法
 同包方法