类org.apache.hadoop.mapred.TaskStatus.State源码实例Demo

下面列出了怎么用org.apache.hadoop.mapred.TaskStatus.State的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: ZombieJob.java
@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
    LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
    double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
      rackRemoteOverNodeLocal };
  double scaleFactor = factors[locality] / factors[loggedLocality];
  State state = convertState(loggedAttempt.getResult());
  if (loggedTask.getTaskType() == Values.MAP) {
    long taskTime = 0;
    if (loggedAttempt.getStartTime() == 0) {
      taskTime = makeUpMapRuntime(state, locality);
    } else {
      taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
    }
    taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
    taskTime *= scaleFactor;
    return new MapTaskAttemptInfo
      (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
  } else {
    throw new IllegalArgumentException("taskType can only be MAP: "
        + loggedTask.getTaskType());
  }
}
 
源代码2 项目: hadoop   文件: ZombieJob.java
private long doMakeUpReduceRuntime(State state) {
  long reduceTime;
  try {
    if (state == State.SUCCEEDED) {
      reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
    } else if (state == State.FAILED) {
      reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
    } else {
      throw new IllegalArgumentException(
          "state is neither SUCCEEDED nor FAILED: " + state);
    }
    return reduceTime;
  } catch (NoValueToMakeUpRuntime e) {
    return 0;
  }
}
 
源代码3 项目: hadoop   文件: ZombieJob.java
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
 
// if numAttempts == null we are returning FAILED.
if(numAttempts == null) {
  return State.FAILED;
}
  if (taskAttemptNumber >= numAttempts.length - 1) {
    // always succeed
    return State.SUCCEEDED;
  } else {
    double pSucceed = numAttempts[taskAttemptNumber];
    double pFail = 0;
    for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
      pFail += numAttempts[i];
    }
    return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
        : State.FAILED;
  }
}
 
源代码4 项目: hadoop   文件: DebugJobProducer.java
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
源代码5 项目: big-c   文件: ZombieJob.java
@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
    LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
    double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
      rackRemoteOverNodeLocal };
  double scaleFactor = factors[locality] / factors[loggedLocality];
  State state = convertState(loggedAttempt.getResult());
  if (loggedTask.getTaskType() == Values.MAP) {
    long taskTime = 0;
    if (loggedAttempt.getStartTime() == 0) {
      taskTime = makeUpMapRuntime(state, locality);
    } else {
      taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
    }
    taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
    taskTime *= scaleFactor;
    return new MapTaskAttemptInfo
      (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
  } else {
    throw new IllegalArgumentException("taskType can only be MAP: "
        + loggedTask.getTaskType());
  }
}
 
源代码6 项目: big-c   文件: ZombieJob.java
private long doMakeUpReduceRuntime(State state) {
  long reduceTime;
  try {
    if (state == State.SUCCEEDED) {
      reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
    } else if (state == State.FAILED) {
      reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
    } else {
      throw new IllegalArgumentException(
          "state is neither SUCCEEDED nor FAILED: " + state);
    }
    return reduceTime;
  } catch (NoValueToMakeUpRuntime e) {
    return 0;
  }
}
 
源代码7 项目: big-c   文件: ZombieJob.java
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
 
// if numAttempts == null we are returning FAILED.
if(numAttempts == null) {
  return State.FAILED;
}
  if (taskAttemptNumber >= numAttempts.length - 1) {
    // always succeed
    return State.SUCCEEDED;
  } else {
    double pSucceed = numAttempts[taskAttemptNumber];
    double pFail = 0;
    for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
      pFail += numAttempts[i];
    }
    return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
        : State.FAILED;
  }
}
 
源代码8 项目: big-c   文件: DebugJobProducer.java
@SuppressWarnings({ "deprecation", "incomplete-switch" })
@Override
public TaskAttemptInfo getTaskAttemptInfo(
  TaskType taskType, int taskNumber, int taskAttemptNumber) {
  switch (taskType) {
    case MAP:
      return new MapTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          m_bytesIn[taskNumber], m_recsIn[taskNumber],
          m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
        100);

    case REDUCE:
      return new ReduceTaskAttemptInfo(
        State.SUCCEEDED, 
        new TaskInfo(
          r_bytesIn[taskNumber], r_recsIn[taskNumber],
          r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
        100, 100, 100);
  }
  throw new UnsupportedOperationException();
}
 
源代码9 项目: RDFS   文件: ZombieJob.java
@SuppressWarnings("hiding") 
private TaskAttemptInfo scaleInfo(LoggedTask loggedTask,
    LoggedTaskAttempt loggedAttempt, int locality, int loggedLocality,
    double rackLocalOverNodeLocal, double rackRemoteOverNodeLocal) {
  TaskInfo taskInfo = getTaskInfo(loggedTask);
  double[] factors = new double[] { 1.0, rackLocalOverNodeLocal,
      rackRemoteOverNodeLocal };
  double scaleFactor = factors[locality] / factors[loggedLocality];
  State state = convertState(loggedAttempt.getResult());
  if (loggedTask.getTaskType() == Values.MAP) {
    long taskTime = 0;
    if (loggedAttempt.getStartTime() == 0) {
      taskTime = makeUpMapRuntime(state, locality);
    } else {
      taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
    }
    taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
    taskTime *= scaleFactor;
    return new MapTaskAttemptInfo(state, taskInfo, taskTime);
  } else {
    throw new IllegalArgumentException("taskType can only be MAP: "
        + loggedTask.getTaskType());
  }
}
 
源代码10 项目: RDFS   文件: ZombieJob.java
private long doMakeUpReduceRuntime(State state) {
  long reduceTime;
  try {
    if (state == State.SUCCEEDED) {
      reduceTime = makeUpRuntime(job.getSuccessfulReduceAttemptCDF());
    } else if (state == State.FAILED) {
      reduceTime = makeUpRuntime(job.getFailedReduceAttemptCDF());
    } else {
      throw new IllegalArgumentException(
          "state is neither SUCCEEDED nor FAILED: " + state);
    }
    return reduceTime;
  } catch (NoValueToMakeUpRuntime e) {
    return 0;
  }
}
 
源代码11 项目: RDFS   文件: ZombieJob.java
private long makeUpMapRuntime(State state, int locality) {
  long runtime;
  // make up runtime
  if (state == State.SUCCEEDED || state == State.FAILED) {
    List<LoggedDiscreteCDF> cdfList =
        state == State.SUCCEEDED ? job.getSuccessfulMapAttemptCDFs() : job
            .getFailedMapAttemptCDFs();
    // XXX MapCDFs is a ArrayList of 4 possible groups: distance=0, 1, 2, and
    // the last group is "distance cannot be determined". All pig jobs
    // would have only the 4th group, and pig tasks usually do not have
    // any locality, so this group should count as "distance=2".
    // However, setup/cleanup tasks are also counted in the 4th group.
    // These tasks do not make sense.
    try {
      runtime = makeUpRuntime(cdfList.get(locality));
    } catch (NoValueToMakeUpRuntime e) {
      runtime = makeUpRuntime(cdfList);
    }
  } else {
    throw new IllegalArgumentException(
        "state is neither SUCCEEDED nor FAILED: " + state);
  }
  return runtime;
}
 
源代码12 项目: RDFS   文件: SimulatorTaskTracker.java
/** 
 * Frees up bookkeping memory used by completed tasks. 
 * Has no effect on the events or logs produced by the SimulatorTaskTracker.
 * We need this in order not to report completed task multiple times and 
 * to ensure that we do not run out of Java heap memory in larger 
 * simulations.
 */
private void garbageCollectCompletedTasks() {
  for (Iterator<TaskAttemptID> iter = tasks.keySet().iterator();
       iter.hasNext();) {
    TaskAttemptID taskId = iter.next();
    SimulatorTaskInProgress tip = tasks.get(taskId);
    if (tip.getTaskStatus().getRunState() != State.RUNNING) {
      iter.remove();
      if (LOG.isDebugEnabled()) {
        LOG.debug("Garbage collected SimulatorTIP, taskId=" + taskId);
      }
      // We don't have to / must not touch usedMapSlots and usedReduceSlots
      // as those were already updated by processTaskAttemptCompletionEvent() 
      // when the task switched its state from running
    }
  }
}
 
源代码13 项目: RDFS   文件: TestSimulatorJobTracker.java
private List<TaskStatus> collectAndCloneTaskStatuses() {
  ArrayList<TaskStatus> statuses = new ArrayList<TaskStatus>();
  Set<TaskAttemptID> mark = new HashSet<TaskAttemptID>();
  for (SimulatorTaskInProgress tip : tasks.values()) {
    statuses.add((TaskStatus) tip.getTaskStatus().clone());
    if (tip.getFinalRunState() == State.SUCCEEDED) {
      mark.add(tip.getTaskStatus().getTaskID());
    }
  }

  for (TaskAttemptID taskId : mark) {
    tasks.remove(taskId);
  }

  return statuses;
}
 
源代码14 项目: hadoop   文件: TaskAttemptInfo.java
protected TaskAttemptInfo
     (State state, TaskInfo taskInfo, List<List<Integer>> allSplits) {
  if (state == State.SUCCEEDED || state == State.FAILED) {
    this.state = state;
  } else {
    throw new IllegalArgumentException("status cannot be " + state);
  }
  this.taskInfo = taskInfo;
  this.allSplits = allSplits;
}
 
源代码15 项目: hadoop   文件: ZombieJob.java
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}
 
源代码16 项目: hadoop   文件: ZombieJob.java
private long makeUpReduceRuntime(State state) {
  long reduceTime = 0;
  for (int i = 0; i < 5; i++) {
    reduceTime = doMakeUpReduceRuntime(state);
    if (reduceTime >= 0) {
      return reduceTime;
    }
  }
  return 0;
}
 
源代码17 项目: hadoop   文件: ZombieJob.java
private long makeUpMapRuntime(State state, int locality) {
  long runtime;
  // make up runtime
  if (state == State.SUCCEEDED || state == State.FAILED) {
    List<LoggedDiscreteCDF> cdfList =
        state == State.SUCCEEDED ? job.getSuccessfulMapAttemptCDFs() : job
            .getFailedMapAttemptCDFs();
    // XXX MapCDFs is a ArrayList of 4 possible groups: distance=0, 1, 2, and
    // the last group is "distance cannot be determined". All pig jobs
    // would have only the 4th group, and pig tasks usually do not have
    // any locality, so this group should count as "distance=2".
    // However, setup/cleanup tasks are also counted in the 4th group.
    // These tasks do not make sense.
    if(cdfList==null) {
  	  runtime = -1;
  	  return runtime;
    }
    try {
      runtime = makeUpRuntime(cdfList.get(locality));
    } catch (NoValueToMakeUpRuntime e) {
      runtime = makeUpRuntime(cdfList);
    }
  } else {
    throw new IllegalArgumentException(
        "state is neither SUCCEEDED nor FAILED: " + state);
  }
  return runtime;
}
 
源代码18 项目: hadoop   文件: ReduceTaskAttemptInfo.java
public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
    long mergeTime, long reduceTime, List<List<Integer>> allSplits) {
  super(state, taskInfo,
        allSplits == null
          ? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
         : allSplits);
  this.shuffleTime = shuffleTime;
  this.mergeTime = mergeTime;
  this.reduceTime = reduceTime;
}
 
源代码19 项目: hadoop   文件: MapTaskAttemptInfo.java
public MapTaskAttemptInfo(State state, TaskInfo taskInfo,
                          long runtime, List<List<Integer>> allSplits) {
  super(state, taskInfo,
        allSplits == null
          ? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
         : allSplits);
  this.runtime = runtime;
}
 
源代码20 项目: big-c   文件: TaskAttemptInfo.java
protected TaskAttemptInfo
     (State state, TaskInfo taskInfo, List<List<Integer>> allSplits) {
  if (state == State.SUCCEEDED || state == State.FAILED) {
    this.state = state;
  } else {
    throw new IllegalArgumentException("status cannot be " + state);
  }
  this.taskInfo = taskInfo;
  this.allSplits = allSplits;
}
 
源代码21 项目: big-c   文件: ZombieJob.java
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}
 
源代码22 项目: big-c   文件: ZombieJob.java
private long makeUpReduceRuntime(State state) {
  long reduceTime = 0;
  for (int i = 0; i < 5; i++) {
    reduceTime = doMakeUpReduceRuntime(state);
    if (reduceTime >= 0) {
      return reduceTime;
    }
  }
  return 0;
}
 
源代码23 项目: big-c   文件: ZombieJob.java
private long makeUpMapRuntime(State state, int locality) {
  long runtime;
  // make up runtime
  if (state == State.SUCCEEDED || state == State.FAILED) {
    List<LoggedDiscreteCDF> cdfList =
        state == State.SUCCEEDED ? job.getSuccessfulMapAttemptCDFs() : job
            .getFailedMapAttemptCDFs();
    // XXX MapCDFs is a ArrayList of 4 possible groups: distance=0, 1, 2, and
    // the last group is "distance cannot be determined". All pig jobs
    // would have only the 4th group, and pig tasks usually do not have
    // any locality, so this group should count as "distance=2".
    // However, setup/cleanup tasks are also counted in the 4th group.
    // These tasks do not make sense.
    if(cdfList==null) {
  	  runtime = -1;
  	  return runtime;
    }
    try {
      runtime = makeUpRuntime(cdfList.get(locality));
    } catch (NoValueToMakeUpRuntime e) {
      runtime = makeUpRuntime(cdfList);
    }
  } else {
    throw new IllegalArgumentException(
        "state is neither SUCCEEDED nor FAILED: " + state);
  }
  return runtime;
}
 
源代码24 项目: big-c   文件: ReduceTaskAttemptInfo.java
public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
    long mergeTime, long reduceTime, List<List<Integer>> allSplits) {
  super(state, taskInfo,
        allSplits == null
          ? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
         : allSplits);
  this.shuffleTime = shuffleTime;
  this.mergeTime = mergeTime;
  this.reduceTime = reduceTime;
}
 
源代码25 项目: big-c   文件: MapTaskAttemptInfo.java
public MapTaskAttemptInfo(State state, TaskInfo taskInfo,
                          long runtime, List<List<Integer>> allSplits) {
  super(state, taskInfo,
        allSplits == null
          ? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
         : allSplits);
  this.runtime = runtime;
}
 
源代码26 项目: RDFS   文件: TaskAttemptInfo.java
protected TaskAttemptInfo(State state, TaskInfo taskInfo) {
  if (state == State.SUCCEEDED || state == State.FAILED) {
    this.state = state;
  } else {
    throw new IllegalArgumentException("status cannot be " + state);
  }
  this.taskInfo = taskInfo;
}
 
源代码27 项目: RDFS   文件: ZombieJob.java
private static State convertState(Values status) {
  if (status == Values.SUCCESS) {
    return State.SUCCEEDED;
  } else if (status == Values.FAILED) {
    return State.FAILED;
  } else if (status == Values.KILLED) {
    return State.KILLED;
  } else {
    throw new IllegalArgumentException("unknown status " + status);
  }
}
 
源代码28 项目: RDFS   文件: ZombieJob.java
private long makeUpReduceRuntime(State state) {
  long reduceTime = 0;
  for (int i = 0; i < 5; i++) {
    reduceTime = doMakeUpReduceRuntime(state);
    if (reduceTime >= 0) {
      return reduceTime;
    }
  }
  return 0;
}
 
源代码29 项目: RDFS   文件: ZombieJob.java
private State makeUpState(int taskAttemptNumber, double[] numAttempts) {
  if (taskAttemptNumber >= numAttempts.length - 1) {
    // always succeed
    return State.SUCCEEDED;
  } else {
    double pSucceed = numAttempts[taskAttemptNumber];
    double pFail = 0;
    for (int i = taskAttemptNumber + 1; i < numAttempts.length; i++) {
      pFail += numAttempts[i];
    }
    return (random.nextDouble() < pSucceed / (pSucceed + pFail)) ? State.SUCCEEDED
        : State.FAILED;
  }
}
 
源代码30 项目: RDFS   文件: ReduceTaskAttemptInfo.java
public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
    long mergeTime, long reduceTime) {
  super(state, taskInfo);
  this.shuffleTime = shuffleTime;
  this.mergeTime = mergeTime;
  this.reduceTime = reduceTime;
}
 
 类所在包
 同包方法