org.apache.hadoop.mapreduce.security.TokenCache#getShuffleSecretKey ( )源码实例Demo

下面列出了org.apache.hadoop.mapreduce.security.TokenCache#getShuffleSecretKey ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: JobImpl.java
protected void setup(JobImpl job) throws IOException {

      String oldJobIDString = job.oldJobId.toString();
      String user = 
        UserGroupInformation.getCurrentUser().getShortUserName();
      Path path = MRApps.getStagingAreaDir(job.conf, user);
      if(LOG.isDebugEnabled()) {
        LOG.debug("startJobs: parent=" + path + " child=" + oldJobIDString);
      }

      job.remoteJobSubmitDir =
          FileSystem.get(job.conf).makeQualified(
              new Path(path, oldJobIDString));
      job.remoteJobConfFile =
          new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);

      // Prepare the TaskAttemptListener server for authentication of Containers
      // TaskAttemptListener gets the information via jobTokenSecretManager.
      JobTokenIdentifier identifier =
          new JobTokenIdentifier(new Text(oldJobIDString));
      job.jobToken =
          new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager);
      job.jobToken.setService(identifier.getJobId());
      // Add it to the jobTokenSecretManager so that TaskAttemptListener server
      // can authenticate containers(tasks)
      job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken);
      LOG.info("Adding job token for " + oldJobIDString
          + " to jobTokenSecretManager");

      // If the job client did not setup the shuffle secret then reuse
      // the job token secret for the shuffle.
      if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) {
        LOG.warn("Shuffle secret key missing from job credentials."
            + " Using job token secret as shuffle secret.");
        TokenCache.setShuffleSecretKey(job.jobToken.getPassword(),
            job.jobCredentials);
      }
    }
 
源代码2 项目: hadoop   文件: MRAppMaster.java
private void processRecovery() throws IOException{
  if (appAttemptID.getAttemptId() == 1) {
    return;  // no need to recover on the first attempt
  }

  boolean recoveryEnabled = getConfig().getBoolean(
      MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,
      MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT);

  boolean recoverySupportedByCommitter = isRecoverySupported();

  // If a shuffle secret was not provided by the job client then this app
  // attempt will generate one.  However that disables recovery if there
  // are reducers as the shuffle secret would be app attempt specific.
  int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
  boolean shuffleKeyValidForRecovery =
      TokenCache.getShuffleSecretKey(jobCredentials) != null;

  if (recoveryEnabled && recoverySupportedByCommitter
      && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) {
    LOG.info("Recovery is enabled. "
        + "Will try to recover from previous life on best effort basis.");
    try {
      parsePreviousJobHistory();
    } catch (IOException e) {
      LOG.warn("Unable to parse prior job history, aborting recovery", e);
      // try to get just the AMInfos
      amInfos.addAll(readJustAMInfos());
    }
  } else {
    LOG.info("Will not try to recover. recoveryEnabled: "
          + recoveryEnabled + " recoverySupportedByCommitter: "
          + recoverySupportedByCommitter + " numReduceTasks: "
          + numReduceTasks + " shuffleKeyValidForRecovery: "
          + shuffleKeyValidForRecovery + " ApplicationAttemptID: "
          + appAttemptID.getAttemptId());
    // Get the amInfos anyways whether recovery is enabled or not
    amInfos.addAll(readJustAMInfos());
  }
}
 
源代码3 项目: big-c   文件: JobImpl.java
protected void setup(JobImpl job) throws IOException {

      String oldJobIDString = job.oldJobId.toString();
      String user = 
        UserGroupInformation.getCurrentUser().getShortUserName();
      Path path = MRApps.getStagingAreaDir(job.conf, user);
      if(LOG.isDebugEnabled()) {
        LOG.debug("startJobs: parent=" + path + " child=" + oldJobIDString);
      }

      job.remoteJobSubmitDir =
          FileSystem.get(job.conf).makeQualified(
              new Path(path, oldJobIDString));
      job.remoteJobConfFile =
          new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);

      // Prepare the TaskAttemptListener server for authentication of Containers
      // TaskAttemptListener gets the information via jobTokenSecretManager.
      JobTokenIdentifier identifier =
          new JobTokenIdentifier(new Text(oldJobIDString));
      job.jobToken =
          new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager);
      job.jobToken.setService(identifier.getJobId());
      // Add it to the jobTokenSecretManager so that TaskAttemptListener server
      // can authenticate containers(tasks)
      job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken);
      LOG.info("Adding job token for " + oldJobIDString
          + " to jobTokenSecretManager");

      // If the job client did not setup the shuffle secret then reuse
      // the job token secret for the shuffle.
      if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) {
        LOG.warn("Shuffle secret key missing from job credentials."
            + " Using job token secret as shuffle secret.");
        TokenCache.setShuffleSecretKey(job.jobToken.getPassword(),
            job.jobCredentials);
      }
    }
 
源代码4 项目: big-c   文件: MRAppMaster.java
private void processRecovery() throws IOException{
  if (appAttemptID.getAttemptId() == 1) {
    return;  // no need to recover on the first attempt
  }

  boolean recoveryEnabled = getConfig().getBoolean(
      MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,
      MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT);

  boolean recoverySupportedByCommitter = isRecoverySupported();

  // If a shuffle secret was not provided by the job client then this app
  // attempt will generate one.  However that disables recovery if there
  // are reducers as the shuffle secret would be app attempt specific.
  int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
  boolean shuffleKeyValidForRecovery =
      TokenCache.getShuffleSecretKey(jobCredentials) != null;

  if (recoveryEnabled && recoverySupportedByCommitter
      && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) {
    LOG.info("Recovery is enabled. "
        + "Will try to recover from previous life on best effort basis.");
    try {
      parsePreviousJobHistory();
    } catch (IOException e) {
      LOG.warn("Unable to parse prior job history, aborting recovery", e);
      // try to get just the AMInfos
      amInfos.addAll(readJustAMInfos());
    }
  } else {
    LOG.info("Will not try to recover. recoveryEnabled: "
          + recoveryEnabled + " recoverySupportedByCommitter: "
          + recoverySupportedByCommitter + " numReduceTasks: "
          + numReduceTasks + " shuffleKeyValidForRecovery: "
          + shuffleKeyValidForRecovery + " ApplicationAttemptID: "
          + appAttemptID.getAttemptId());
    // Get the amInfos anyways whether recovery is enabled or not
    amInfos.addAll(readJustAMInfos());
  }
}
 
源代码5 项目: hadoop   文件: YarnChild.java
private static void configureTask(JobConf job, Task task,
    Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException {
  job.setCredentials(credentials);
  
  ApplicationAttemptId appAttemptId =
      ConverterUtils.toContainerId(
          System.getenv(Environment.CONTAINER_ID.name()))
          .getApplicationAttemptId();
  LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
  // Set it in conf, so as to be able to be used the the OutputCommitter.
  job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
      appAttemptId.getAttemptId());

  // set tcp nodelay
  job.setBoolean("ipc.client.tcpnodelay", true);
  job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
      YarnOutputFiles.class, MapOutputFile.class);
  // set the jobToken and shuffle secrets into task
  task.setJobTokenSecret(
      JobTokenSecretManager.createSecretKey(jt.getPassword()));
  byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
  if (shuffleSecret == null) {
    LOG.warn("Shuffle secret missing from task credentials."
        + " Using job token secret as shuffle secret.");
    shuffleSecret = jt.getPassword();
  }
  task.setShuffleSecret(
      JobTokenSecretManager.createSecretKey(shuffleSecret));

  // setup the child's MRConfig.LOCAL_DIR.
  configureLocalDirs(task, job);

  // setup the child's attempt directories
  // Do the task-type specific localization
  task.localizeConfiguration(job);

  // Set up the DistributedCache related configs
  MRApps.setupDistributedCacheLocal(job);

  // Overwrite the localized task jobconf which is linked to in the current
  // work-dir.
  Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
  writeLocalJobFile(localTaskFile, job);
  task.setJobFile(localTaskFile.toString());
  task.setConf(job);
}
 
源代码6 项目: big-c   文件: YarnChild.java
private static void configureTask(JobConf job, Task task,
    Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException {
  job.setCredentials(credentials);
  
  ApplicationAttemptId appAttemptId =
      ConverterUtils.toContainerId(
          System.getenv(Environment.CONTAINER_ID.name()))
          .getApplicationAttemptId();
  LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
  // Set it in conf, so as to be able to be used the the OutputCommitter.
  job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
      appAttemptId.getAttemptId());

  // set tcp nodelay
  job.setBoolean("ipc.client.tcpnodelay", true);
  job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
      YarnOutputFiles.class, MapOutputFile.class);
  // set the jobToken and shuffle secrets into task
  task.setJobTokenSecret(
      JobTokenSecretManager.createSecretKey(jt.getPassword()));
  byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
  if (shuffleSecret == null) {
    LOG.warn("Shuffle secret missing from task credentials."
        + " Using job token secret as shuffle secret.");
    shuffleSecret = jt.getPassword();
  }
  task.setShuffleSecret(
      JobTokenSecretManager.createSecretKey(shuffleSecret));

  // setup the child's MRConfig.LOCAL_DIR.
  configureLocalDirs(task, job);

  // setup the child's attempt directories
  // Do the task-type specific localization
  task.localizeConfiguration(job);

  // Set up the DistributedCache related configs
  MRApps.setupDistributedCacheLocal(job);

  // Overwrite the localized task jobconf which is linked to in the current
  // work-dir.
  Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
  writeLocalJobFile(localTaskFile, job);
  task.setJobFile(localTaskFile.toString());
  task.setConf(job);
}