类org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: datawave   文件: MockReduceDriver.java
List<MRPair<KEYOUT,VALUEOUT>> run() throws IOException, InterruptedException {
    TaskAttemptID id = new TaskAttemptID("testJob", 0, TaskType.REDUCE, 0, 0);
    final MockReduceContext context = new MockReduceContext(this.conf, id, BulkIngestKey.class, Value.class);
    context.reduceInput = this.input.iterator();
    
    Reducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT>.Context con = new WrappedReducer<KEYIN,VALUEIN,KEYOUT,VALUEOUT>().getReducerContext(context);
    this.reducer.run(con);
    return context.output;
}
 
源代码2 项目: hadoop   文件: Task.java
@SuppressWarnings("unchecked")
protected static <INKEY,INVALUE,OUTKEY,OUTVALUE> 
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
                      <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
                    Configuration job,
                    org.apache.hadoop.mapreduce.TaskAttemptID taskId, 
                    RawKeyValueIterator rIter,
                    org.apache.hadoop.mapreduce.Counter inputKeyCounter,
                    org.apache.hadoop.mapreduce.Counter inputValueCounter,
                    org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output, 
                    org.apache.hadoop.mapreduce.OutputCommitter committer,
                    org.apache.hadoop.mapreduce.StatusReporter reporter,
                    RawComparator<INKEY> comparator,
                    Class<INKEY> keyClass, Class<INVALUE> valueClass
) throws IOException, InterruptedException {
  org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE> 
  reduceContext = 
    new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId, 
                                                            rIter, 
                                                            inputKeyCounter, 
                                                            inputValueCounter, 
                                                            output, 
                                                            committer, 
                                                            reporter, 
                                                            comparator, 
                                                            keyClass, 
                                                            valueClass);

  org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context 
      reducerContext = 
        new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
            reduceContext);

  return reducerContext;
}
 
源代码3 项目: hadoop   文件: Chain.java
/**
 * Create a reduce context that is based on ChainMapContext and the given
 * record writer
 */
private <KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(
    RecordWriter<KEYOUT, VALUEOUT> rw,
    ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context,
    Configuration conf) {
  ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext = 
    new ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
        context, rw, conf);
  Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext = 
    new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
      .getReducerContext(reduceContext);
  return reducerContext;
}
 
源代码4 项目: big-c   文件: Task.java
@SuppressWarnings("unchecked")
protected static <INKEY,INVALUE,OUTKEY,OUTVALUE> 
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
                      <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
                    Configuration job,
                    org.apache.hadoop.mapreduce.TaskAttemptID taskId, 
                    RawKeyValueIterator rIter,
                    org.apache.hadoop.mapreduce.Counter inputKeyCounter,
                    org.apache.hadoop.mapreduce.Counter inputValueCounter,
                    org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output, 
                    org.apache.hadoop.mapreduce.OutputCommitter committer,
                    org.apache.hadoop.mapreduce.StatusReporter reporter,
                    RawComparator<INKEY> comparator,
                    Class<INKEY> keyClass, Class<INVALUE> valueClass
) throws IOException, InterruptedException {
  org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE> 
  reduceContext = 
    new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId, 
                                                            rIter, 
                                                            inputKeyCounter, 
                                                            inputValueCounter, 
                                                            output, 
                                                            committer, 
                                                            reporter, 
                                                            comparator, 
                                                            keyClass, 
                                                            valueClass);

  org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context 
      reducerContext = 
        new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
            reduceContext);

  return reducerContext;
}
 
源代码5 项目: big-c   文件: Chain.java
/**
 * Create a reduce context that is based on ChainMapContext and the given
 * record writer
 */
private <KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(
    RecordWriter<KEYOUT, VALUEOUT> rw,
    ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> context,
    Configuration conf) {
  ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reduceContext = 
    new ChainReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
        context, rw, conf);
  Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext = 
    new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
      .getReducerContext(reduceContext);
  return reducerContext;
}
 
源代码6 项目: hadoop   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testLoadJobLoadReducer() throws Exception {
  LoadJob.LoadReducer test = new LoadJob.LoadReducer();

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskid = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  LoadRecordWriter output = new LoadRecordWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
          conf, taskid, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, GridmixRecord.class);
  // read for previous data
  reduceContext.nextKeyValue();
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
          .getReducerContext(reduceContext);

  // test.setup(context);
  test.run(context);
  // have been readed 9 records (-1 for previous)
  assertEquals(9, counter.getValue());
  assertEquals(10, inputValueCounter.getValue());
  assertEquals(1, output.getData().size());
  GridmixRecord record = output.getData().values().iterator()
          .next();

  assertEquals(1593, record.getSize());
}
 
源代码7 项目: hadoop   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testSleepReducer() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskId = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
          conf, taskId, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, NullWritable.class);
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
          .getReducerContext(reducecontext);

  SleepReducer test = new SleepReducer();
  long start = System.currentTimeMillis();
  test.setup(context);
  long sleeper = context.getCurrentKey().getReduceOutputBytes();
  // status has been changed
  assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
  // should sleep 0.9 sec

  assertTrue(System.currentTimeMillis() >= (start + sleeper));
  test.cleanup(context);
  // status has been changed again

  assertEquals("Slept for " + sleeper, context.getStatus());

}
 
源代码8 项目: big-c   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testLoadJobLoadReducer() throws Exception {
  LoadJob.LoadReducer test = new LoadJob.LoadReducer();

  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskid = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  LoadRecordWriter output = new LoadRecordWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
          conf, taskid, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, GridmixRecord.class);
  // read for previous data
  reduceContext.nextKeyValue();
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>.Context context = new WrappedReducer<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>()
          .getReducerContext(reduceContext);

  // test.setup(context);
  test.run(context);
  // have been readed 9 records (-1 for previous)
  assertEquals(9, counter.getValue());
  assertEquals(10, inputValueCounter.getValue());
  assertEquals(1, output.getData().size());
  GridmixRecord record = output.getData().values().iterator()
          .next();

  assertEquals(1593, record.getSize());
}
 
源代码9 项目: big-c   文件: TestGridMixClasses.java
@Test (timeout=3000)
public void testSleepReducer() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(JobContext.NUM_REDUCES, 2);
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
  TaskAttemptID taskId = new TaskAttemptID();

  RawKeyValueIterator input = new FakeRawKeyValueReducerIterator();

  Counter counter = new GenericCounter();
  Counter inputValueCounter = new GenericCounter();
  RecordWriter<NullWritable, NullWritable> output = new LoadRecordReduceWriter();

  OutputCommitter committer = new CustomOutputCommitter();

  StatusReporter reporter = new DummyReporter();
  RawComparator<GridmixKey> comparator = new FakeRawComparator();

  ReduceContext<GridmixKey, NullWritable, NullWritable, NullWritable> reducecontext = new ReduceContextImpl<GridmixKey, NullWritable, NullWritable, NullWritable>(
          conf, taskId, input, counter, inputValueCounter, output, committer,
          reporter, comparator, GridmixKey.class, NullWritable.class);
  org.apache.hadoop.mapreduce.Reducer<GridmixKey, NullWritable, NullWritable, NullWritable>.Context context = new WrappedReducer<GridmixKey, NullWritable, NullWritable, NullWritable>()
          .getReducerContext(reducecontext);

  SleepReducer test = new SleepReducer();
  long start = System.currentTimeMillis();
  test.setup(context);
  long sleeper = context.getCurrentKey().getReduceOutputBytes();
  // status has been changed
  assertEquals("Sleeping... " + sleeper + " ms left", context.getStatus());
  // should sleep 0.9 sec

  assertTrue(System.currentTimeMillis() >= (start + sleeper));
  test.cleanup(context);
  // status has been changed again

  assertEquals("Slept for " + sleeper, context.getStatus());

}
 
源代码10 项目: incubator-gobblin   文件: KeyDedupReducerTest.java
@Test
public void testAvroReduce()
    throws IOException, InterruptedException {
  Schema keySchema = new Schema.Parser().parse(AVRO_KEY_SCHEMA);
  GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(keySchema.getField("key").schema());
  keyRecordBuilder.set("partitionKey", 1);
  keyRecordBuilder.set("environment", "test");
  keyRecordBuilder.set("subKey", "2");
  GenericRecord record = keyRecordBuilder.build();
  keyRecordBuilder = new GenericRecordBuilder(keySchema);
  keyRecordBuilder.set("key", record);
  GenericRecord keyRecord = keyRecordBuilder.build();

  // Test reducer with delta field "scn"
  Schema fullSchema = new Schema.Parser().parse(AVRO_FULL_SCHEMA);
  AvroValue<GenericRecord> fullRecord1 = new AvroValue<>();
  AvroValue<GenericRecord> fullRecord2 = new AvroValue<>();
  AvroValue<GenericRecord> fullRecord3 = new AvroValue<>();
  AvroValue<GenericRecord> fullRecord4 = new AvroValue<>();

  GenericRecordBuilder fullRecordBuilder1 = new GenericRecordBuilder(fullSchema);
  fullRecordBuilder1.set("key", record);
  fullRecordBuilder1.set("scn", 123);
  fullRecordBuilder1.set("scn2", 100);
  fullRecord1.datum(fullRecordBuilder1.build());
  fullRecordBuilder1.set("scn", 125);
  fullRecordBuilder1.set("scn2", 1);
  fullRecord2.datum(fullRecordBuilder1.build());
  fullRecordBuilder1.set("scn", 124);
  fullRecordBuilder1.set("scn2", 10);
  fullRecord3.datum(fullRecordBuilder1.build());
  fullRecordBuilder1.set("scn", 122);
  fullRecordBuilder1.set("scn2", 1000);
  fullRecord4.datum(fullRecordBuilder1.build());

  Configuration conf = mock(Configuration.class);
  when(conf.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER))
      .thenReturn(FieldAttributeBasedDeltaFieldsProvider.class.getName());
  when(conf.get(FieldAttributeBasedDeltaFieldsProvider.ATTRIBUTE_FIELD)).thenReturn("attributes_json");

  when(conf.get(FieldAttributeBasedDeltaFieldsProvider.DELTA_PROP_NAME,
      FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME))
      .thenReturn(FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME);
  RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
      AvroKey<GenericRecord>, NullWritable> reducer = new AvroKeyDedupReducer();

  WrappedReducer.Context reducerContext = mock(WrappedReducer.Context.class);
  when(reducerContext.getConfiguration()).thenReturn(conf);
  Counter moreThan1Counter = new GenericCounter();
  when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.MORE_THAN_1)).thenReturn(moreThan1Counter);

  Counter dedupedCounter = new GenericCounter();
  when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.DEDUPED)).thenReturn(dedupedCounter);

  Counter recordCounter = new GenericCounter();
  when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT)).thenReturn(recordCounter);
  reducer.setup(reducerContext);

  doNothing().when(reducerContext).write(any(AvroKey.class), any(NullWritable.class));
  List<AvroValue<GenericRecord>> valueIterable =
      Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4);

  AvroKey<GenericRecord> key = new AvroKey<>();
  key.datum(keyRecord);
  reducer.reduce(key, valueIterable, reducerContext);
  Assert.assertEquals(reducer.getOutKey().datum(), fullRecord2.datum());

  // Test reducer without delta field
  Configuration conf2 = mock(Configuration.class);
  when(conf2.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER)).thenReturn(null);
  when(reducerContext.getConfiguration()).thenReturn(conf2);
  RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>,
      AvroKey<GenericRecord>, NullWritable> reducer2 = new AvroKeyDedupReducer();
  reducer2.setup(reducerContext);
  reducer2.reduce(key, valueIterable, reducerContext);
  Assert.assertEquals(reducer2.getOutKey().datum(), fullRecord1.datum());

  // Test reducer with compound delta key.
  Schema fullSchema2 = new Schema.Parser().parse(AVRO_FULL_SCHEMA_WITH_TWO_DELTA_FIELDS);
  GenericRecordBuilder fullRecordBuilder2 = new GenericRecordBuilder(fullSchema2);
  fullRecordBuilder2.set("key", record);
  fullRecordBuilder2.set("scn", 123);
  fullRecordBuilder2.set("scn2", 100);
  fullRecord1.datum(fullRecordBuilder2.build());
  fullRecordBuilder2.set("scn", 125);
  fullRecordBuilder2.set("scn2", 1000);
  fullRecord2.datum(fullRecordBuilder2.build());
  fullRecordBuilder2.set("scn", 126);
  fullRecordBuilder2.set("scn2", 1000);
  fullRecord3.datum(fullRecordBuilder2.build());
  fullRecordBuilder2.set("scn", 130);
  fullRecordBuilder2.set("scn2", 100);
  fullRecord4.datum(fullRecordBuilder2.build());
  List<AvroValue<GenericRecord>> valueIterable2 =
      Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4);
  reducer.reduce(key, valueIterable2, reducerContext);
  Assert.assertEquals(reducer.getOutKey().datum(), fullRecord3.datum());

}
 
源代码11 项目: incubator-tez   文件: MRTask.java
protected static <INKEY,INVALUE,OUTKEY,OUTVALUE>
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
                      <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
                    Configuration job,
                    TaskAttemptID taskId,
                    final TezRawKeyValueIterator rIter,
                    org.apache.hadoop.mapreduce.Counter inputKeyCounter,
                    org.apache.hadoop.mapreduce.Counter inputValueCounter,
                    org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
                    org.apache.hadoop.mapreduce.OutputCommitter committer,
                    org.apache.hadoop.mapreduce.StatusReporter reporter,
                    RawComparator<INKEY> comparator,
                    Class<INKEY> keyClass, Class<INVALUE> valueClass
) throws IOException, InterruptedException {
  RawKeyValueIterator r =
      new RawKeyValueIterator() {

        @Override
        public boolean next() throws IOException {
          return rIter.next();
        }

        @Override
        public DataInputBuffer getValue() throws IOException {
          return rIter.getValue();
        }

        @Override
        public Progress getProgress() {
          return rIter.getProgress();
        }

        @Override
        public DataInputBuffer getKey() throws IOException {
          return rIter.getKey();
        }

        @Override
        public void close() throws IOException {
          rIter.close();
        }
      };
  org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
  reduceContext =
    new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(
        job,
        taskId,
        r,
        inputKeyCounter,
        inputValueCounter,
        output,
        committer,
        reporter,
        comparator,
        keyClass,
        valueClass);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using key class: " + keyClass
        + ", valueClass: " + valueClass);
  }

  org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
      reducerContext =
        new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
            reduceContext);

  return reducerContext;
}
 
源代码12 项目: incubator-tez   文件: MRCombiner.java
private static <KEYIN, VALUEIN, KEYOUT, VALUEOUT> org.apache.hadoop.mapreduce.Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(
    Configuration conf,
    TaskAttemptID mrTaskAttemptID,
    final TezRawKeyValueIterator rawIter,
    Counter combineInputKeyCounter,
    Counter combineInputValueCounter,
    RecordWriter<KEYOUT, VALUEOUT> recordWriter,
    MRTaskReporter reporter,
    RawComparator<KEYIN> comparator,
    Class<KEYIN> keyClass,
    Class<VALUEIN> valClass) throws InterruptedException, IOException {

  RawKeyValueIterator r = new RawKeyValueIterator() {

    @Override
    public boolean next() throws IOException {
      return rawIter.next();
    }

    @Override
    public DataInputBuffer getValue() throws IOException {
      return rawIter.getValue();
    }

    @Override
    public Progress getProgress() {
      return rawIter.getProgress();
    }

    @Override
    public DataInputBuffer getKey() throws IOException {
      return rawIter.getKey();
    }

    @Override
    public void close() throws IOException {
      rawIter.close();
    }
  };

  ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> rContext = new ReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
      conf, mrTaskAttemptID, r, combineInputKeyCounter,
      combineInputValueCounter, recordWriter, null, reporter, comparator,
      keyClass, valClass);

  org.apache.hadoop.mapreduce.Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext = new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
      .getReducerContext(rContext);
  return reducerContext;
}
 
源代码13 项目: tez   文件: MRTask.java
protected static <INKEY,INVALUE,OUTKEY,OUTVALUE>
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
                      <INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
                    Configuration job,
                    TaskAttemptID taskId,
                    final TezRawKeyValueIterator rIter,
                    org.apache.hadoop.mapreduce.Counter inputKeyCounter,
                    org.apache.hadoop.mapreduce.Counter inputValueCounter,
                    org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
                    org.apache.hadoop.mapreduce.OutputCommitter committer,
                    org.apache.hadoop.mapreduce.StatusReporter reporter,
                    RawComparator<INKEY> comparator,
                    Class<INKEY> keyClass, Class<INVALUE> valueClass
) throws IOException, InterruptedException {
  RawKeyValueIterator r =
      new RawKeyValueIterator() {

        @Override
        public boolean next() throws IOException {
          return rIter.next();
        }

        @Override
        public DataInputBuffer getValue() throws IOException {
          return rIter.getValue();
        }

        @Override
        public Progress getProgress() {
          return rIter.getProgress();
        }

        @Override
        public DataInputBuffer getKey() throws IOException {
          return rIter.getKey();
        }

        @Override
        public void close() throws IOException {
          rIter.close();
        }
      };
  org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
  reduceContext =
    new ReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(
        job,
        taskId,
        r,
        inputKeyCounter,
        inputValueCounter,
        output,
        committer,
        reporter,
        comparator,
        keyClass,
        valueClass);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Using key class: " + keyClass
        + ", valueClass: " + valueClass);
  }

  org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
      reducerContext =
        new WrappedReducer<INKEY, INVALUE, OUTKEY, OUTVALUE>().getReducerContext(
            reduceContext);

  return reducerContext;
}
 
源代码14 项目: tez   文件: MRCombiner.java
private static <KEYIN, VALUEIN, KEYOUT, VALUEOUT> org.apache.hadoop.mapreduce.Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context createReduceContext(
    Configuration conf,
    TaskAttemptID mrTaskAttemptID,
    final TezRawKeyValueIterator rawIter,
    Counter combineInputRecordsCounter,
    Counter combineOutputRecordsCounter,
    RecordWriter<KEYOUT, VALUEOUT> recordWriter,
    MRTaskReporter reporter,
    RawComparator<KEYIN> comparator,
    Class<KEYIN> keyClass,
    Class<VALUEIN> valClass) throws InterruptedException, IOException {

  RawKeyValueIterator r = new RawKeyValueIterator() {

    @Override
    public boolean next() throws IOException {
      return rawIter.next();
    }

    @Override
    public DataInputBuffer getValue() throws IOException {
      return rawIter.getValue();
    }

    @Override
    public Progress getProgress() {
      return rawIter.getProgress();
    }

    @Override
    public DataInputBuffer getKey() throws IOException {
      return rawIter.getKey();
    }

    @Override
    public void close() throws IOException {
      rawIter.close();
    }
  };

  ReduceContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> rContext = new ReduceContextImpl<KEYIN, VALUEIN, KEYOUT, VALUEOUT>(
      conf, mrTaskAttemptID, r, null,
      combineInputRecordsCounter, recordWriter, null, reporter, comparator,
      keyClass, valClass);

  org.apache.hadoop.mapreduce.Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context reducerContext = new WrappedReducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>()
      .getReducerContext(rContext);
  return reducerContext;
}
 
 同包方法