org.apache.hadoop.mapred.JobConfigurable#org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter源码实例Demo

下面列出了org.apache.hadoop.mapred.JobConfigurable#org.apache.flink.api.java.hadoop.mapred.wrapper.HadoopDummyReporter 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);
	this.combiner.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<>(keySerializer);
	this.combineCollector = new HadoopOutputCollector<>();
	this.reduceCollector = new HadoopOutputCollector<>();
}
 
源代码2 项目: Flink-CEPplus   文件: HadoopReduceFunction.java
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
 
源代码3 项目: Flink-CEPplus   文件: HadoopMapFunction.java
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.mapper.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.outputCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
}
 
源代码4 项目: Flink-CEPplus   文件: HadoopInputFormatBase.java
@Override
public void open(HadoopInputSplit split) throws IOException {

	// enforce sequential open() calls
	synchronized (OPEN_MUTEX) {

		this.recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());
		if (this.recordReader instanceof Configurable) {
			((Configurable) this.recordReader).setConf(jobConf);
		}
		key = this.recordReader.createKey();
		value = this.recordReader.createValue();
		this.fetched = false;
	}
}
 
源代码5 项目: Flink-CEPplus   文件: HadoopOutputFormatBase.java
/**
 * commit the task by moving the output file out from the temporary directory.
 * @throws java.io.IOException
 */
@Override
public void close() throws IOException {

	// enforce sequential close() calls
	synchronized (CLOSE_MUTEX) {
		this.recordWriter.close(new HadoopDummyReporter());

		if (this.outputCommitter.needsTaskCommit(this.context)) {
			this.outputCommitter.commitTask(this.context);
		}
	}
}
 
源代码6 项目: flink   文件: HadoopReduceCombineFunction.java
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);
	this.combiner.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<>(keySerializer);
	this.combineCollector = new HadoopOutputCollector<>();
	this.reduceCollector = new HadoopOutputCollector<>();
}
 
源代码7 项目: flink   文件: HadoopReduceFunction.java
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
 
源代码8 项目: flink   文件: HadoopMapFunction.java
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.mapper.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.outputCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
}
 
源代码9 项目: flink   文件: HadoopInputFormatBase.java
@Override
public void open(HadoopInputSplit split) throws IOException {

	// enforce sequential open() calls
	synchronized (OPEN_MUTEX) {

		this.recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());
		if (this.recordReader instanceof Configurable) {
			((Configurable) this.recordReader).setConf(jobConf);
		}
		key = this.recordReader.createKey();
		value = this.recordReader.createValue();
		this.fetched = false;
	}
}
 
源代码10 项目: flink   文件: HadoopOutputFormatBase.java
/**
 * commit the task by moving the output file out from the temporary directory.
 * @throws java.io.IOException
 */
@Override
public void close() throws IOException {

	// enforce sequential close() calls
	synchronized (CLOSE_MUTEX) {
		this.recordWriter.close(new HadoopDummyReporter());

		if (this.outputCommitter.needsTaskCommit(this.context)) {
			this.outputCommitter.commitTask(this.context);
		}
	}
}
 
源代码11 项目: flink   文件: HadoopReduceCombineFunction.java
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);
	this.combiner.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<>(keySerializer);
	this.combineCollector = new HadoopOutputCollector<>();
	this.reduceCollector = new HadoopOutputCollector<>();
}
 
源代码12 项目: flink   文件: HadoopReduceFunction.java
@SuppressWarnings("unchecked")
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.reducer.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.reduceCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
	Class<KEYIN> inKeyClass = (Class<KEYIN>) TypeExtractor.getParameterType(Reducer.class, reducer.getClass(), 0);
	TypeSerializer<KEYIN> keySerializer = TypeExtractor.getForClass(inKeyClass).createSerializer(getRuntimeContext().getExecutionConfig());
	this.valueIterator = new HadoopTupleUnwrappingIterator<KEYIN, VALUEIN>(keySerializer);
}
 
源代码13 项目: flink   文件: HadoopMapFunction.java
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);
	this.mapper.configure(jobConf);

	this.reporter = new HadoopDummyReporter();
	this.outputCollector = new HadoopOutputCollector<KEYOUT, VALUEOUT>();
}
 
源代码14 项目: flink   文件: HadoopInputFormatBase.java
@Override
public void open(HadoopInputSplit split) throws IOException {

	// enforce sequential open() calls
	synchronized (OPEN_MUTEX) {

		this.recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());
		if (this.recordReader instanceof Configurable) {
			((Configurable) this.recordReader).setConf(jobConf);
		}
		key = this.recordReader.createKey();
		value = this.recordReader.createValue();
		this.fetched = false;
	}
}
 
源代码15 项目: flink   文件: HadoopOutputFormatBase.java
/**
 * commit the task by moving the output file out from the temporary directory.
 * @throws java.io.IOException
 */
@Override
public void close() throws IOException {

	// enforce sequential close() calls
	synchronized (CLOSE_MUTEX) {
		this.recordWriter.close(new HadoopDummyReporter());

		if (this.outputCommitter.needsTaskCommit(this.context)) {
			this.outputCommitter.commitTask(this.context);
		}
	}
}
 
源代码16 项目: cascading-flink   文件: TapInputFormat.java
@Override
public void open(HadoopInputSplit split) throws IOException {

	this.jobConf = split.getJobConf();
	this.flowProcess = new FlinkFlowProcess(this.jobConf, this.getRuntimeContext(), flowNode.getID());

	processBeginTime = System.currentTimeMillis();
	flowProcess.increment( SliceCounters.Process_Begin_Time, processBeginTime );

	try {

		Set<FlowElement> sources = flowNode.getSourceElements();
		if(sources.size() != 1) {
			throw new RuntimeException("FlowNode for TapInputFormat may only have a single source");
		}
		FlowElement sourceElement = sources.iterator().next();
		if(!(sourceElement instanceof Tap)) {
			throw new RuntimeException("Source of TapInputFormat must be a Tap");
		}
		Tap source = (Tap)sourceElement;

		streamGraph = new SourceStreamGraph( flowProcess, flowNode, source );

		sourceStage = this.streamGraph.getSourceStage();
		sinkStage = this.streamGraph.getSinkStage();

		for( Duct head : streamGraph.getHeads() ) {
			LOG.info("sourcing from: " + ((ElementDuct) head).getFlowElement());
		}

		for( Duct tail : streamGraph.getTails() ) {
			LOG.info("sinking to: " + ((ElementDuct) tail).getFlowElement());
		}

	}
	catch( Throwable throwable ) {

		if( throwable instanceof CascadingException) {
			throw (CascadingException) throwable;
		}

		throw new FlowException( "internal error during TapInputFormat configuration", throwable );
	}

	RecordReader<?, ?> recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter());

	if (recordReader instanceof Configurable) {
		((Configurable) recordReader).setConf(jobConf);
	}
	else if (recordReader instanceof JobConfigurable) {
		((JobConfigurable) recordReader).configure(jobConf);
	}

	try {
		this.sourceStage.setRecordReader(recordReader);
	} catch(Throwable t) {
		if(t instanceof IOException) {
			throw (IOException)t;
		}
		else {
			throw new RuntimeException(t);
		}
	}

}