下面列出了org.apache.hadoop.mapred.Reporter#NULL 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private void combineAndSpill(
RawKeyValueIterator kvIter,
Counters.Counter inCounter) throws IOException {
JobConf job = jobConf;
Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
RawComparator<K> comparator =
(RawComparator<K>)job.getCombinerKeyGroupingComparator();
try {
CombineValuesIterator values = new CombineValuesIterator(
kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
inCounter);
while (values.more()) {
combiner.reduce(values.getKey(), values, combineCollector,
Reporter.NULL);
values.nextKey();
}
} finally {
combiner.close();
}
}
private void combineAndSpill(
RawKeyValueIterator kvIter,
Counters.Counter inCounter) throws IOException {
JobConf job = jobConf;
Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
RawComparator<K> comparator =
(RawComparator<K>)job.getCombinerKeyGroupingComparator();
try {
CombineValuesIterator values = new CombineValuesIterator(
kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
inCounter);
while (values.more()) {
combiner.reduce(values.getKey(), values, combineCollector,
Reporter.NULL);
values.nextKey();
}
} finally {
combiner.close();
}
}
private static RecordWriter createParquetWriter(Path target, JobConf conf, Properties properties)
throws IOException
{
if (conf.get(DataWritableWriteSupport.PARQUET_HIVE_SCHEMA) == null) {
List<String> columnNames = Splitter.on(',').splitToList(properties.getProperty(IOConstants.COLUMNS));
List<TypeInfo> columnTypes = getTypeInfosFromTypeString(properties.getProperty(IOConstants.COLUMNS_TYPES));
MessageType schema = HiveSchemaConverter.convert(columnNames, columnTypes);
setParquetSchema(conf, schema);
}
ParquetOutputFormat<ParquetHiveRecord> outputFormat = new ParquetOutputFormat<>(new DataWritableWriteSupport());
return new ParquetRecordWriterWrapper(outputFormat, conf, target.toString(), Reporter.NULL, properties);
}
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
Reporter reporter) throws IOException {
if (Reporter.NULL == reporter) {
Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
}
}
void waitOutputThreads() throws IOException {
try {
if (outThread_ == null) {
// This happens only when reducer has empty input(So reduce() is not
// called at all in this task). If reducer still generates output,
// which is very uncommon and we may not have to support this case.
// So we don't write this output to HDFS, but we consume/collect
// this output just to avoid reducer hanging forever.
OutputCollector collector = new OutputCollector() {
public void collect(Object key, Object value)
throws IOException {
//just consume it, no need to write the record anywhere
}
};
Reporter reporter = Reporter.NULL;//dummy reporter
startOutputThreads(collector, reporter);
}
int exitVal = sim.waitFor();
// how'd it go?
if (exitVal != 0) {
if (nonZeroExitIsFailure_) {
throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
+ exitVal);
} else {
LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
"code " + exitVal + " in " + PipeMapRed.class.getName());
}
}
if (outThread_ != null) {
outThread_.join(joinDelay_);
}
if (errThread_ != null) {
errThread_.join(joinDelay_);
}
if (outerrThreadsThrowable != null) {
throw new RuntimeException(outerrThreadsThrowable);
}
} catch (InterruptedException e) {
//ignore
}
}
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
Reporter reporter) throws IOException {
if (Reporter.NULL == reporter) {
Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
}
}
void waitOutputThreads() throws IOException {
try {
if (outThread_ == null) {
// This happens only when reducer has empty input(So reduce() is not
// called at all in this task). If reducer still generates output,
// which is very uncommon and we may not have to support this case.
// So we don't write this output to HDFS, but we consume/collect
// this output just to avoid reducer hanging forever.
OutputCollector collector = new OutputCollector() {
public void collect(Object key, Object value)
throws IOException {
//just consume it, no need to write the record anywhere
}
};
Reporter reporter = Reporter.NULL;//dummy reporter
startOutputThreads(collector, reporter);
}
int exitVal = sim.waitFor();
// how'd it go?
if (exitVal != 0) {
if (nonZeroExitIsFailure_) {
throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
+ exitVal);
} else {
LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
"code " + exitVal + " in " + PipeMapRed.class.getName());
}
}
if (outThread_ != null) {
outThread_.join(joinDelay_);
}
if (errThread_ != null) {
errThread_.join(joinDelay_);
}
if (outerrThreadsThrowable != null) {
throw new RuntimeException(outerrThreadsThrowable);
}
} catch (InterruptedException e) {
//ignore
}
}
void waitOutputThreads() {
try {
if (outThread_ == null) {
// This happens only when reducer has empty input(So reduce() is not
// called at all in this task). If reducer still generates output,
// which is very uncommon and we may not have to support this case.
// So we don't write this output to HDFS, but we consume/collect
// this output just to avoid reducer hanging forever.
OutputCollector collector = new OutputCollector() {
public void collect(Object key, Object value)
throws IOException {
//just consume it, no need to write the record anywhere
}
};
Reporter reporter = Reporter.NULL;//dummy reporter
startOutputThreads(collector, reporter);
}
int exitVal = sim.waitFor();
// how'd it go?
if (exitVal != 0) {
if (nonZeroExitIsFailure_) {
throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
+ exitVal);
} else {
logprintln("PipeMapRed.waitOutputThreads(): subprocess exited with code " + exitVal
+ " in " + PipeMapRed.class.getName());
}
}
if (outThread_ != null) {
outThread_.join(joinDelay_);
}
if (errThread_ != null) {
errThread_.join(joinDelay_);
}
if (outerrThreadsThrowable != null) {
throw new RuntimeException(outerrThreadsThrowable);
}
} catch (InterruptedException e) {
//ignore
}
}
public JobContextImpl(JobConf conf, TezDAGID dagId) {
this(conf, dagId, Reporter.NULL);
}
public JobContextImpl(JobConf conf, TezDAGID dagId) {
this(conf, dagId, Reporter.NULL);
}
void waitOutputThreads() {
try {
if (outThread_ == null) {
// This happens only when reducer has empty input(So reduce() is not
// called at all in this task). If reducer still generates output,
// which is very uncommon and we may not have to support this case.
// So we don't write this output to HDFS, but we consume/collect
// this output just to avoid reducer hanging forever.
OutputCollector collector = new OutputCollector() {
public void collect(Object key, Object value)
throws IOException {
//just consume it, no need to write the record anywhere
}
};
Reporter reporter = Reporter.NULL;//dummy reporter
startOutputThreads(collector, reporter);
}
int exitVal = sim.waitFor();
// how'd it go?
if (exitVal != 0) {
if (nonZeroExitIsFailure_) {
throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
+ exitVal);
} else {
logprintln("PipeMapRed.waitOutputThreads(): subprocess exited with code " + exitVal
+ " in " + PipeMapRed.class.getName());
}
}
if (outThread_ != null) {
outThread_.join(joinDelay_);
}
if (errThread_ != null) {
errThread_.join(joinDelay_);
}
if (outerrThreadsThrowable != null) {
throw new RuntimeException(outerrThreadsThrowable);
}
} catch (InterruptedException e) {
//ignore
}
}