org.apache.hadoop.mapred.Reporter#NULL源码实例Demo

下面列出了org.apache.hadoop.mapred.Reporter#NULL 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: MergeManagerImpl.java
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
源代码2 项目: big-c   文件: MergeManagerImpl.java
private void combineAndSpill(
    RawKeyValueIterator kvIter,
    Counters.Counter inCounter) throws IOException {
  JobConf job = jobConf;
  Reducer combiner = ReflectionUtils.newInstance(combinerClass, job);
  Class<K> keyClass = (Class<K>) job.getMapOutputKeyClass();
  Class<V> valClass = (Class<V>) job.getMapOutputValueClass();
  RawComparator<K> comparator = 
    (RawComparator<K>)job.getCombinerKeyGroupingComparator();
  try {
    CombineValuesIterator values = new CombineValuesIterator(
        kvIter, comparator, keyClass, valClass, job, Reporter.NULL,
        inCounter);
    while (values.more()) {
      combiner.reduce(values.getKey(), values, combineCollector,
                      Reporter.NULL);
      values.nextKey();
    }
  } finally {
    combiner.close();
  }
}
 
源代码3 项目: presto   文件: ParquetRecordWriterUtil.java
private static RecordWriter createParquetWriter(Path target, JobConf conf, Properties properties)
        throws IOException
{
    if (conf.get(DataWritableWriteSupport.PARQUET_HIVE_SCHEMA) == null) {
        List<String> columnNames = Splitter.on(',').splitToList(properties.getProperty(IOConstants.COLUMNS));
        List<TypeInfo> columnTypes = getTypeInfosFromTypeString(properties.getProperty(IOConstants.COLUMNS_TYPES));
        MessageType schema = HiveSchemaConverter.convert(columnNames, columnTypes);
        setParquetSchema(conf, schema);
    }

    ParquetOutputFormat<ParquetHiveRecord> outputFormat = new ParquetOutputFormat<>(new DataWritableWriteSupport());

    return new ParquetRecordWriterWrapper(outputFormat, conf, target.toString(), Reporter.NULL, properties);
}
 
源代码4 项目: hadoop   文件: TestMRAppWithCombiner.java
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
    Reporter reporter) throws IOException {
  if (Reporter.NULL == reporter) {
    Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
  }
}
 
源代码5 项目: hadoop   文件: PipeMapRed.java
void waitOutputThreads() throws IOException {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
        		"code " + exitVal + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}
 
源代码6 项目: big-c   文件: TestMRAppWithCombiner.java
public void reduce(K key, Iterator<V> values, OutputCollector<K, V> output,
    Reporter reporter) throws IOException {
  if (Reporter.NULL == reporter) {
    Assert.fail("A valid Reporter should have been used but, Reporter.NULL is used");
  }
}
 
源代码7 项目: big-c   文件: PipeMapRed.java
void waitOutputThreads() throws IOException {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        LOG.info("PipeMapRed.waitOutputThreads(): subprocess exited with " +
        		"code " + exitVal + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}
 
源代码8 项目: RDFS   文件: PipeMapRed.java
void waitOutputThreads() {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        logprintln("PipeMapRed.waitOutputThreads(): subprocess exited with code " + exitVal
                   + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}
 
源代码9 项目: incubator-tez   文件: JobContextImpl.java
public JobContextImpl(JobConf conf, TezDAGID dagId) {
  this(conf, dagId, Reporter.NULL);
}
 
源代码10 项目: tez   文件: JobContextImpl.java
public JobContextImpl(JobConf conf, TezDAGID dagId) {
  this(conf, dagId, Reporter.NULL);
}
 
源代码11 项目: hadoop-gpu   文件: PipeMapRed.java
void waitOutputThreads() {
  try {
    if (outThread_ == null) {
      // This happens only when reducer has empty input(So reduce() is not
      // called at all in this task). If reducer still generates output,
      // which is very uncommon and we may not have to support this case.
      // So we don't write this output to HDFS, but we consume/collect
      // this output just to avoid reducer hanging forever.

      OutputCollector collector = new OutputCollector() {
        public void collect(Object key, Object value)
          throws IOException {
          //just consume it, no need to write the record anywhere
        }
      };
      Reporter reporter = Reporter.NULL;//dummy reporter
      startOutputThreads(collector, reporter);
    }
    int exitVal = sim.waitFor();
    // how'd it go?
    if (exitVal != 0) {
      if (nonZeroExitIsFailure_) {
        throw new RuntimeException("PipeMapRed.waitOutputThreads(): subprocess failed with code "
                                   + exitVal);
      } else {
        logprintln("PipeMapRed.waitOutputThreads(): subprocess exited with code " + exitVal
                   + " in " + PipeMapRed.class.getName());
      }
    }
    if (outThread_ != null) {
      outThread_.join(joinDelay_);
    }
    if (errThread_ != null) {
      errThread_.join(joinDelay_);
    }
    if (outerrThreadsThrowable != null) {
      throw new RuntimeException(outerrThreadsThrowable);
    }
  } catch (InterruptedException e) {
    //ignore
  }
}