下面列出了org.apache.hadoop.mapreduce.JobSubmissionFiles#getJobSplitFile ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
/**
* Generate new-api mapreduce InputFormat splits
* @param jobContext JobContext required by InputFormat
* @param inputSplitDir Directory in which to generate splits information
*
* @return InputSplitInfo containing the split files' information and the
* location hints for each split generated to be used to determining parallelism of
* the map stage.
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
private static InputSplitInfoDisk writeNewSplits(JobContext jobContext,
Path inputSplitDir) throws IOException, InterruptedException,
ClassNotFoundException {
org.apache.hadoop.mapreduce.InputSplit[] splits =
generateNewSplits(jobContext, null, 0);
Configuration conf = jobContext.getConfiguration();
JobSplitWriter.createSplitFiles(inputSplitDir, conf,
inputSplitDir.getFileSystem(conf), splits);
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splits.length);
for (int i = 0; i < splits.length; ++i) {
locationHints.add(
new TaskLocationHint(new HashSet<String>(
Arrays.asList(splits[i].getLocations())), null));
}
return new InputSplitInfoDisk(
JobSubmissionFiles.getJobSplitFile(inputSplitDir),
JobSubmissionFiles.getJobSplitMetaFile(inputSplitDir),
splits.length, locationHints, jobContext.getCredentials());
}
/**
* Generate old-api mapred InputFormat splits
* @param jobConf JobConf required by InputFormat class
* @param inputSplitDir Directory in which to generate splits information
*
* @return InputSplitInfo containing the split files' information and the
* number of splits generated to be used to determining parallelism of
* the map stage.
*
* @throws IOException
*/
private static InputSplitInfoDisk writeOldSplits(JobConf jobConf,
Path inputSplitDir) throws IOException {
org.apache.hadoop.mapred.InputSplit[] splits =
generateOldSplits(jobConf, null, 0);
JobSplitWriter.createSplitFiles(inputSplitDir, jobConf,
inputSplitDir.getFileSystem(jobConf), splits);
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splits.length);
for (int i = 0; i < splits.length; ++i) {
locationHints.add(
new TaskLocationHint(new HashSet<String>(
Arrays.asList(splits[i].getLocations())), null));
}
return new InputSplitInfoDisk(
JobSubmissionFiles.getJobSplitFile(inputSplitDir),
JobSubmissionFiles.getJobSplitMetaFile(inputSplitDir),
splits.length, locationHints, jobConf.getCredentials());
}
/**
* Generate old-api mapred InputFormat splits
* @param jobConf JobConf required by InputFormat class
* @param inputSplitDir Directory in which to generate splits information
*
* @return InputSplitInfo containing the split files' information and the
* number of splits generated to be used to determining parallelism of
* the map stage.
*
* @throws IOException
*/
private static InputSplitInfoDisk writeOldSplits(JobConf jobConf,
Path inputSplitDir) throws IOException {
org.apache.hadoop.mapred.InputSplit[] splits =
generateOldSplits(jobConf, false, true, 0);
JobSplitWriter.createSplitFiles(inputSplitDir, jobConf,
inputSplitDir.getFileSystem(jobConf), splits);
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splits.length);
for (int i = 0; i < splits.length; ++i) {
locationHints.add(
TaskLocationHint.createTaskLocationHint(new HashSet<String>(
Arrays.asList(splits[i].getLocations())), null)
);
}
return new InputSplitInfoDisk(
JobSubmissionFiles.getJobSplitFile(inputSplitDir),
JobSubmissionFiles.getJobSplitMetaFile(inputSplitDir),
splits.length, locationHints, jobConf.getCredentials());
}
/**
* Generate new-api mapreduce InputFormat splits
* @param jobContext JobContext required by InputFormat
* @param inputSplitDir Directory in which to generate splits information
*
* @return InputSplitInfo containing the split files' information and the
* location hints for each split generated to be used to determining parallelism of
* the map stage.
*
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
private static InputSplitInfoDisk writeNewSplits(JobContext jobContext,
Path inputSplitDir) throws IOException, InterruptedException,
ClassNotFoundException {
org.apache.hadoop.mapreduce.InputSplit[] splits =
generateNewSplits(jobContext, false, true, 0);
Configuration conf = jobContext.getConfiguration();
JobSplitWriter.createSplitFiles(inputSplitDir, conf,
inputSplitDir.getFileSystem(conf), splits);
List<TaskLocationHint> locationHints =
new ArrayList<TaskLocationHint>(splits.length);
for (int i = 0; i < splits.length; ++i) {
locationHints.add(
TaskLocationHint.createTaskLocationHint(new HashSet<String>(
Arrays.asList(splits[i].getLocations())), null)
);
}
return new InputSplitInfoDisk(
JobSubmissionFiles.getJobSplitFile(inputSplitDir),
JobSubmissionFiles.getJobSplitMetaFile(inputSplitDir),
splits.length, locationHints, jobContext.getCredentials());
}