下面列出了org.apache.hadoop.mapreduce.Job#isComplete ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@Override
public void cancel(String jobId) throws IOException {
JobID id = JobID.forName(jobId);
Cluster cluster = new Cluster(this.getConf());
try {
Job job = cluster.getJob(id);
if (job == null) {
LOG.error("No job found for " + id);
// should we throw exception
return;
}
if (job.isComplete() || job.isRetired()) {
return;
}
job.killJob();
LOG.debug("Killed copy job " + id);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
private void submitAndWait(Job job) throws ClassNotFoundException, IOException, InterruptedException {
job.submit();
MRCompactor.addRunningHadoopJob(this.dataset, job);
LOG.info(String.format("MR job submitted for dataset %s, input %s, url: %s", this.dataset, getInputPaths(),
job.getTrackingURL()));
while (!job.isComplete()) {
if (this.policy == Policy.ABORT_ASAP) {
LOG.info(String.format(
"MR job for dataset %s, input %s killed due to input data incompleteness." + " Will try again later",
this.dataset, getInputPaths()));
job.killJob();
return;
}
Thread.sleep(MR_JOB_CHECK_COMPLETE_INTERVAL_MS);
}
if (!job.isSuccessful()) {
throw new RuntimeException(String.format("MR job failed for topic %s, input %s, url: %s", this.dataset,
getInputPaths(), job.getTrackingURL()));
}
}
@Override
public void cancel() throws IOException {
try {
for (Map.Entry<Dataset, Job> entry : MRCompactor.RUNNING_MR_JOBS.entrySet()) {
Job hadoopJob = entry.getValue();
if (!hadoopJob.isComplete()) {
LOG.info(String.format("Killing hadoop job %s for dataset %s", hadoopJob.getJobID(), entry.getKey()));
hadoopJob.killJob();
}
}
} finally {
try {
ExecutorsUtils.shutdownExecutorService(this.jobExecutor, Optional.of(LOG), 0, TimeUnit.NANOSECONDS);
} finally {
if (this.verifier.isPresent()) {
this.verifier.get().closeNow();
}
}
}
}
private void waitForJob(Job job) throws InterruptedException, IOException {
while (!job.isComplete()) {
LOG.debug("waiting for job {}", job.getJobName());
sleep(100);
}
LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
private void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
sleep(100);
}
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
private void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
sleep(100);
}
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
private void waitForJob(Job job) throws InterruptedException, IOException {
while (!job.isComplete()) {
System.out.println("waiting for job " + job.getJobName());
sleep(100);
}
System.out.println("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
public static void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
LOG.debug("waiting for job {}", job.getJobName());
sleep(100);
}
LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
static void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
LOG.debug("waiting for job {}", job.getJobName());
sleep(50);
}
LOG.debug("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
private void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
LOG.debug("waiting for job {}", job.getJobName());
sleep(100);
}
LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
private void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
LOG.debug("waiting for job {}", job.getJobName());
sleep(100);
}
LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
private void waitForJob(Job job) throws Exception {
job.submit();
while (!job.isComplete()) {
LOG.debug("waiting for job {}", job.getJobName());
sleep(100);
}
LOG.info("status for job {}: {}", job.getJobName(), (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
if (!job.isSuccessful()) {
throw new RuntimeException("job failed " + job.getJobName());
}
}
@Override
public TaskRegionObserver.TaskResult checkCurrentResult(Task.TaskRecord taskRecord)
throws Exception {
String jobID = getJobID(taskRecord.getData());
if (jobID != null) {
Configuration conf = HBaseConfiguration.create(env.getConfiguration());
Configuration configuration = HBaseConfiguration.addHbaseResources(conf);
Cluster cluster = new Cluster(configuration);
Job job = cluster.getJob(org.apache.hadoop.mapreduce.JobID.forName(jobID));
if (job == null) {
return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SKIPPED, "");
}
if (job != null && job.isComplete()) {
if (job.isSuccessful()) {
LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful "
+ taskRecord.getTableName());
return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
} else {
return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL,
"Index is DISABLED");
}
}
}
return null;
}
public void testBlurOutputFormatCleanupDuringJobKillTest() throws IOException, InterruptedException,
ClassNotFoundException {
Path input = getInDir();
Path output = getOutDir();
_fileSystem.delete(input, true);
_fileSystem.delete(output, true);
// 1500 * 50 = 75,000
writeRecordsFile(new Path(input, "part1"), 1, 50, 1, 1500, "cf1");
// 100 * 5000 = 500,000
writeRecordsFile(new Path(input, "part2"), 1, 5000, 2000, 100, "cf1");
Job job = Job.getInstance(_conf, "blur index");
job.setJarByClass(BlurOutputFormatTest.class);
job.setMapperClass(CsvBlurMapper.class);
job.setInputFormatClass(TextInputFormat.class);
FileInputFormat.addInputPath(job, input);
CsvBlurMapper.addColumns(job, "cf1", "col");
Path tablePath = new Path(new Path(_root, "table"), "test");
TableDescriptor tableDescriptor = new TableDescriptor();
tableDescriptor.setShardCount(2);
tableDescriptor.setTableUri(tablePath.toString());
tableDescriptor.setName("test");
createShardDirectories(getOutDir(), 2);
BlurOutputFormat.setupJob(job, tableDescriptor);
BlurOutputFormat.setOutputPath(job, output);
BlurOutputFormat.setIndexLocally(job, false);
job.submit();
boolean killCalled = false;
while (!job.isComplete()) {
Thread.sleep(1000);
System.out.printf("Killed [" + killCalled + "] Map [%f] Reduce [%f]%n", job.mapProgress() * 100,
job.reduceProgress() * 100);
if (job.reduceProgress() > 0.7 && !killCalled) {
job.killJob();
killCalled = true;
}
}
assertFalse(job.isSuccessful());
for (int i = 0; i < tableDescriptor.getShardCount(); i++) {
Path path = new Path(output, ShardUtil.getShardName(i));
FileSystem fileSystem = path.getFileSystem(job.getConfiguration());
FileStatus[] listStatus = fileSystem.listStatus(path);
assertEquals(toString(listStatus), 0, listStatus.length);
}
}