org.apache.hadoop.mapred.RunningJob#killJob ( )源码实例Demo

下面列出了org.apache.hadoop.mapred.RunningJob#killJob ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: DataJoinJob.java
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
源代码2 项目: big-c   文件: DataJoinJob.java
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
源代码3 项目: spork   文件: MapReduceLauncher.java
@Override
public void killJob(String jobID, Configuration conf) throws BackendException {
    try {
        if (conf != null) {
            JobConf jobConf = new JobConf(conf);
            JobClient jc = new JobClient(jobConf);
            JobID id = JobID.forName(jobID);
            RunningJob job = jc.getJob(id);
            if (job == null)
                System.out.println("Job with id " + jobID + " is not active");
            else
            {
                job.killJob();
                log.info("Kill " + id + " submitted.");
            }
        }
    } catch (IOException e) {
        throw new BackendException(e);
    }
}
 
源代码4 项目: RDFS   文件: DataJoinJob.java
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
源代码5 项目: hadoop-gpu   文件: DataJoinJob.java
/**
 * Submit/run a map/reduce job.
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
  JobClient jc = new JobClient(job);
  boolean sucess = true;
  RunningJob running = null;
  try {
    running = jc.submitJob(job);
    JobID jobId = running.getID();
    System.out.println("Job " + jobId + " is submitted");
    while (!running.isComplete()) {
      System.out.println("Job " + jobId + " is still running.");
      try {
        Thread.sleep(60000);
      } catch (InterruptedException e) {
      }
      running = jc.getJob(jobId);
    }
    sucess = running.isSuccessful();
  } finally {
    if (!sucess && (running != null)) {
      running.killJob();
    }
    jc.close();
  }
  return sucess;
}
 
/**
 * {@inheritDoc}
 */
@Override
public void stop(String externalJobId) {
  try {
    RunningJob runningJob = jobClient.getJob(JobID.forName(externalJobId));
    if(runningJob == null) {
      return;
    }

    runningJob.killJob();
  } catch (IOException e) {
    throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0003, e);
  }
}
 
源代码7 项目: oink   文件: PigJobServerImpl.java
@Override
public boolean cancelRequest(String requestId) throws IOException {
	PigRequestStats stats = this.getRequestStats(requestId);
	
	if (stats.getStatus().equals(Status.SUBMITTED.toString())) {
		List<String> jobs= stats.getJobs();
		for (String job : jobs) {
			job= job.substring(JT_UI.length());
			JobConf jobConf = new JobConf();
			jobConf.set("fs.default.name", PropertyLoader.getInstance().getProperty("fs.default.name"));
			jobConf.set("mapred.job.tracker", PropertyLoader.getInstance().getProperty("jobtracker"));
			try {
			   JobClient jobClient = new JobClient(jobConf);
			   RunningJob rJob = jobClient.getJob(JobID.forName(job));
			   
			   if (! rJob.isComplete()) {
				   rJob.killJob();
			   }
			} catch (Exception e) {
			   throw new IOException ("Unable to kill job " + job);
			}
		}
		PigRequestStats requestStats= new PigRequestStats(0, 0, null, jobs.size());
		requestStats.setJobs(jobs);
		requestStats.setStatus(Status.KILLED.toString());
		Path statsPath= new Path(PropertyLoader.getInstance().getProperty(Constants.REQUEST_PATH) + requestId + "/stats");
		PigUtils.writeStatsFile(statsPath, requestStats);
		return true;
	} else {
		return false;
	}
}
 
源代码8 项目: spork   文件: HadoopShims.java
public static void killJob(Job job) throws IOException {
    RunningJob runningJob = job.getJobClient().getJob(job.getAssignedJobID());
    if (runningJob != null)
        runningJob.killJob();
}