类org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo源码实例Demo

下面列出了怎么用org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: JobImpl.java
private void createMapTasks(JobImpl job, long inputLength,
                            TaskSplitMetaInfo[] splits) {
  for (int i=0; i < job.numMapTasks; ++i) {
    TaskImpl task =
        new MapTaskImpl(job.jobId, i,
            job.eventHandler, 
            job.remoteJobConfFile, 
            job.conf, splits[i], 
            job.taskAttemptListener, 
            job.jobToken, job.jobCredentials,
            job.clock,
            job.applicationAttemptId.getAttemptId(),
            job.metrics, job.appContext);
    job.addTask(task);
  }
  LOG.info("Input size for job " + job.jobId + " = " + inputLength
      + ". Number of splits = " + splits.length);
}
 
源代码2 项目: big-c   文件: JobImpl.java
private void createMapTasks(JobImpl job, long inputLength,
                            TaskSplitMetaInfo[] splits) {
  for (int i=0; i < job.numMapTasks; ++i) {
    TaskImpl task =
        new MapTaskImpl(job.jobId, i,
            job.eventHandler, 
            job.remoteJobConfFile, 
            job.conf, splits[i], 
            job.taskAttemptListener, 
            job.jobToken, job.jobCredentials,
            job.clock,
            job.applicationAttemptId.getAttemptId(),
            job.metrics, job.appContext);
    job.addTask(task);
  }
  LOG.info("Input size for job " + job.jobId + " = " + inputLength
      + ". Number of splits = " + splits.length);
}
 
源代码3 项目: incubator-tez   文件: YARNRunner.java
private List<TaskLocationHint> getMapLocationHintsFromInputSplits(JobID jobId,
    FileSystem fs, Configuration conf,
    String jobSubmitDir) throws IOException {
  TaskSplitMetaInfo[] splitsInfo =
      SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, conf,
          new Path(jobSubmitDir));
  int splitsCount = splitsInfo.length;
  List<TaskLocationHint> locationHints =
      new ArrayList<TaskLocationHint>(splitsCount);
  for (int i = 0; i < splitsCount; ++i) {
    TaskLocationHint locationHint =
        new TaskLocationHint(
            new HashSet<String>(
                Arrays.asList(splitsInfo[i].getLocations())), null);
    locationHints.add(locationHint);
  }
  return locationHints;
}
 
源代码4 项目: incubator-tez   文件: TestMRHelpers.java
private void verifyLocationHints(Path inputSplitsDir,
    List<TaskLocationHint> actual) throws Exception {
  JobID jobId = new JobID("dummy", 1);
  TaskSplitMetaInfo[] splitsInfo =
      SplitMetaInfoReader.readSplitMetaInfo(jobId , remoteFs,
          conf, inputSplitsDir);
  int splitsCount = splitsInfo.length;
  List<TaskLocationHint> locationHints =
      new ArrayList<TaskLocationHint>(splitsCount);
  for (int i = 0; i < splitsCount; ++i) {
    locationHints.add(
        new TaskLocationHint(new HashSet<String>(
            Arrays.asList(splitsInfo[i].getLocations())), null));
  }

  Assert.assertEquals(locationHints, actual);
}
 
源代码5 项目: tez   文件: SplitMetaInfoReaderTez.java
public static TaskSplitMetaInfo[] readSplitMetaInfo(Configuration conf,
    FileSystem fs) throws IOException {
  FSDataInputStream in = null;
  try {
    in = getFSDataIS(conf, fs);
    final String jobSplitFile = MRJobConfig.JOB_SPLIT;
    final String basePath = conf.get(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, ".");
    int numSplits = WritableUtils.readVInt(in); // TODO: check for insane values
    JobSplit.TaskSplitMetaInfo[] allSplitMetaInfo = new JobSplit.TaskSplitMetaInfo[numSplits];
    for (int i = 0; i < numSplits; i++) {
      JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo();
      splitMetaInfo.readFields(in);
      JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex(
          new Path(basePath, jobSplitFile)
              .toUri().toString(), splitMetaInfo.getStartOffset());
      allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex,
          splitMetaInfo.getLocations(), splitMetaInfo.getInputDataLength());
    }
    return allSplitMetaInfo;
  } finally {
    if (in != null) {
      in.close();
    }
  }
}
 
源代码6 项目: tez   文件: YARNRunner.java
private List<TaskLocationHint> getMapLocationHintsFromInputSplits(JobID jobId,
    FileSystem fs, Configuration conf,
    String jobSubmitDir) throws IOException {
  TaskSplitMetaInfo[] splitsInfo =
      SplitMetaInfoReader.readSplitMetaInfo(jobId, fs, conf,
          new Path(jobSubmitDir));
  int splitsCount = splitsInfo.length;
  List<TaskLocationHint> locationHints =
      new ArrayList<TaskLocationHint>(splitsCount);
  for (int i = 0; i < splitsCount; ++i) {
    TaskLocationHint locationHint =
        TaskLocationHint.createTaskLocationHint(
            new HashSet<String>(
                Arrays.asList(splitsInfo[i].getLocations())), null
        );
    locationHints.add(locationHint);
  }
  return locationHints;
}
 
源代码7 项目: hadoop   文件: MapTaskAttemptImpl.java
public MapTaskAttemptImpl(TaskId taskId, int attempt, 
    EventHandler eventHandler, Path jobFile, 
    int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
    TaskAttemptListener taskAttemptListener, 
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext) {
  super(taskId, attempt, eventHandler, 
      taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
      jobToken, credentials, clock, appContext);
  this.splitInfo = splitInfo;
}
 
源代码8 项目: hadoop   文件: MapTaskImpl.java
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
    Path remoteJobConfFile, JobConf conf,
    TaskSplitMetaInfo taskSplitMetaInfo,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
      conf, taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.taskSplitMetaInfo = taskSplitMetaInfo;
}
 
源代码9 项目: hadoop   文件: JobImpl.java
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
  TaskSplitMetaInfo[] allTaskSplitMetaInfo;
  try {
    allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
        job.oldJobId, job.fs, 
        job.conf, 
        job.remoteJobSubmitDir);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
  return allTaskSplitMetaInfo;
}
 
源代码10 项目: hadoop   文件: TestRecovery.java
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {

    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);

    int partitions = 2;

    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken =
        (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);

    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);

    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
        eh, remoteJobConfFile, conf,
        taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
        appAttemptId, metrics, appContext);
    return mapTask;
  }
 
源代码11 项目: hadoop   文件: TestTaskAttempt.java
@Test
public void testSingleRackRequest() throws Exception {
  TaskAttemptImpl.RequestContainerTransition rct =
      new TaskAttemptImpl.RequestContainerTransition(false);

  EventHandler eventHandler = mock(EventHandler.class);
  String[] hosts = new String[3];
  hosts[0] = "host1";
  hosts[1] = "host2";
  hosts[2] = "host3";
  TaskSplitMetaInfo splitInfo =
      new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

  TaskAttemptImpl mockTaskAttempt =
      createMapTaskAttemptImplForTest(eventHandler, splitInfo);
  TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);

  rct.transition(mockTaskAttempt, mockTAEvent);

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(eventHandler, times(2)).handle(arg.capture());
  if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
    Assert.fail("Second Event not of type ContainerRequestEvent");
  }
  ContainerRequestEvent cre =
      (ContainerRequestEvent) arg.getAllValues().get(1);
  String[] requestedRacks = cre.getRacks();
  //Only a single occurrence of /DefaultRack
  assertEquals(1, requestedRacks.length);
}
 
源代码12 项目: hadoop   文件: TestTaskAttempt.java
@Test
public void testHostResolveAttempt() throws Exception {
  TaskAttemptImpl.RequestContainerTransition rct =
      new TaskAttemptImpl.RequestContainerTransition(false);

  EventHandler eventHandler = mock(EventHandler.class);
  String[] hosts = new String[3];
  hosts[0] = "192.168.1.1";
  hosts[1] = "host2";
  hosts[2] = "host3";
  TaskSplitMetaInfo splitInfo =
      new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

  TaskAttemptImpl mockTaskAttempt =
      createMapTaskAttemptImplForTest(eventHandler, splitInfo);
  TaskAttemptImpl spyTa = spy(mockTaskAttempt);
  when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
  spyTa.dataLocalHosts = spyTa.resolveHosts(splitInfo.getLocations());

  TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
  rct.transition(spyTa, mockTAEvent);
  verify(spyTa).resolveHost(hosts[0]);
  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(eventHandler, times(2)).handle(arg.capture());
  if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
    Assert.fail("Second Event not of type ContainerRequestEvent");
  }
  Map<String, Boolean> expected = new HashMap<String, Boolean>();
  expected.put("host1", true);
  expected.put("host2", true);
  expected.put("host3", true);
  ContainerRequestEvent cre =
      (ContainerRequestEvent) arg.getAllValues().get(1);
  String[] requestedHosts = cre.getHosts();
  for (String h : requestedHosts) {
    expected.remove(h);
  }
  assertEquals(0, expected.size());
}
 
源代码13 项目: hadoop   文件: TestTaskAttempt.java
private TaskAttemptImpl createMapTaskAttemptImplForTest(
    EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  Path jobFile = mock(Path.class);
  JobConf jobConf = new JobConf();
  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          taskSplitMetaInfo, jobConf, taListener, null,
          null, clock, null);
  return taImpl;
}
 
源代码14 项目: hadoop   文件: TestJobImpl.java
private static InitTransition getInitTransition(final int numSplits) {
  InitTransition initTransition = new InitTransition() {
    @Override
    protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
      TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numSplits];
      for (int i = 0; i < numSplits; ++i) {
        splits[i] = new TaskSplitMetaInfo();
      }
      return splits;
    }
  };
  return initTransition;
}
 
源代码15 项目: hadoop   文件: TestTaskImpl.java
@Before 
@SuppressWarnings("unchecked")
public void setup() {
   dispatcher = new InlineDispatcher();
  
  ++startCount;
  
  conf = new JobConf();
  taskAttemptListener = mock(TaskAttemptListener.class);
  jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
  remoteJobConfFile = mock(Path.class);
  credentials = null;
  clock = new SystemClock();
  metrics = mock(MRAppMetrics.class);  
  dataLocations = new String[1];
  
  appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);

  jobId = Records.newRecord(JobId.class);
  jobId.setId(1);
  jobId.setAppId(appId);
  appContext = mock(AppContext.class);

  taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
  when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
  
  taskAttempts = new ArrayList<MockTaskAttemptImpl>();    
}
 
源代码16 项目: hadoop   文件: MRApp.java
@Override
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
  TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[maps];
  for (int i = 0; i < maps ; i++) {
    splits[i] = new TaskSplitMetaInfo();
  }
  return splits;
}
 
源代码17 项目: hadoop   文件: LocalJobRunner.java
public MapTaskRunnable(TaskSplitMetaInfo info, int taskId, JobID jobId,
    Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
  this.info = info;
  this.taskId = taskId;
  this.mapOutputFiles = mapOutputFiles;
  this.jobId = jobId;
  this.localConf = new JobConf(job);
}
 
源代码18 项目: hadoop   文件: LocalJobRunner.java
/**
 * Create Runnables to encapsulate map tasks for use by the executor
 * service.
 * @param taskInfo Info about the map task splits
 * @param jobId the job id
 * @param mapOutputFiles a mapping from task attempts to output files
 * @return a List of Runnables, one per map task.
 */
protected List<RunnableWithThrowable> getMapTaskRunnables(
    TaskSplitMetaInfo [] taskInfo, JobID jobId,
    Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {

  int numTasks = 0;
  ArrayList<RunnableWithThrowable> list =
      new ArrayList<RunnableWithThrowable>();
  for (TaskSplitMetaInfo task : taskInfo) {
    list.add(new MapTaskRunnable(task, numTasks++, jobId,
        mapOutputFiles));
  }

  return list;
}
 
源代码19 项目: big-c   文件: MapTaskAttemptImpl.java
public MapTaskAttemptImpl(TaskId taskId, int attempt, 
    EventHandler eventHandler, Path jobFile, 
    int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
    TaskAttemptListener taskAttemptListener, 
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    AppContext appContext) {
  super(taskId, attempt, eventHandler, 
      taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
      jobToken, credentials, clock, appContext);
  this.splitInfo = splitInfo;
}
 
源代码20 项目: big-c   文件: MapTaskImpl.java
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
    Path remoteJobConfFile, JobConf conf,
    TaskSplitMetaInfo taskSplitMetaInfo,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
      conf, taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.taskSplitMetaInfo = taskSplitMetaInfo;
}
 
源代码21 项目: big-c   文件: JobImpl.java
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
  TaskSplitMetaInfo[] allTaskSplitMetaInfo;
  try {
    allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
        job.oldJobId, job.fs, 
        job.conf, 
        job.remoteJobSubmitDir);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
  return allTaskSplitMetaInfo;
}
 
源代码22 项目: big-c   文件: TestRecovery.java
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {

    ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);

    int partitions = 2;

    Path remoteJobConfFile = mock(Path.class);
    JobConf conf = new JobConf();
    TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
    Token<JobTokenIdentifier> jobToken =
        (Token<JobTokenIdentifier>) mock(Token.class);
    Credentials credentials = null;
    Clock clock = new SystemClock();
    int appAttemptId = 3;
    MRAppMetrics metrics = mock(MRAppMetrics.class);
    Resource minContainerRequirements = mock(Resource.class);
    when(minContainerRequirements.getMemory()).thenReturn(1000);

    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClusterInfo()).thenReturn(clusterInfo);

    TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
    MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
        eh, remoteJobConfFile, conf,
        taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
        appAttemptId, metrics, appContext);
    return mapTask;
  }
 
源代码23 项目: big-c   文件: TestTaskAttempt.java
@Test
public void testSingleRackRequest() throws Exception {
  TaskAttemptImpl.RequestContainerTransition rct =
      new TaskAttemptImpl.RequestContainerTransition(false);

  EventHandler eventHandler = mock(EventHandler.class);
  String[] hosts = new String[3];
  hosts[0] = "host1";
  hosts[1] = "host2";
  hosts[2] = "host3";
  TaskSplitMetaInfo splitInfo =
      new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

  TaskAttemptImpl mockTaskAttempt =
      createMapTaskAttemptImplForTest(eventHandler, splitInfo);
  TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);

  rct.transition(mockTaskAttempt, mockTAEvent);

  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(eventHandler, times(2)).handle(arg.capture());
  if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
    Assert.fail("Second Event not of type ContainerRequestEvent");
  }
  ContainerRequestEvent cre =
      (ContainerRequestEvent) arg.getAllValues().get(1);
  String[] requestedRacks = cre.getRacks();
  //Only a single occurrence of /DefaultRack
  assertEquals(1, requestedRacks.length);
}
 
源代码24 项目: big-c   文件: TestTaskAttempt.java
@Test
public void testHostResolveAttempt() throws Exception {
  TaskAttemptImpl.RequestContainerTransition rct =
      new TaskAttemptImpl.RequestContainerTransition(false);

  EventHandler eventHandler = mock(EventHandler.class);
  String[] hosts = new String[3];
  hosts[0] = "192.168.1.1";
  hosts[1] = "host2";
  hosts[2] = "host3";
  TaskSplitMetaInfo splitInfo =
      new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);

  TaskAttemptImpl mockTaskAttempt =
      createMapTaskAttemptImplForTest(eventHandler, splitInfo);
  TaskAttemptImpl spyTa = spy(mockTaskAttempt);
  when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
  spyTa.dataLocalHosts = spyTa.resolveHosts(splitInfo.getLocations());

  TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
  rct.transition(spyTa, mockTAEvent);
  verify(spyTa).resolveHost(hosts[0]);
  ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
  verify(eventHandler, times(2)).handle(arg.capture());
  if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
    Assert.fail("Second Event not of type ContainerRequestEvent");
  }
  Map<String, Boolean> expected = new HashMap<String, Boolean>();
  expected.put("host1", true);
  expected.put("host2", true);
  expected.put("host3", true);
  ContainerRequestEvent cre =
      (ContainerRequestEvent) arg.getAllValues().get(1);
  String[] requestedHosts = cre.getHosts();
  for (String h : requestedHosts) {
    expected.remove(h);
  }
  assertEquals(0, expected.size());
}
 
源代码25 项目: big-c   文件: TestTaskAttempt.java
private TaskAttemptImpl createMapTaskAttemptImplForTest(
    EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  Path jobFile = mock(Path.class);
  JobConf jobConf = new JobConf();
  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          taskSplitMetaInfo, jobConf, taListener, null,
          null, clock, null);
  return taImpl;
}
 
源代码26 项目: big-c   文件: TestJobImpl.java
private static InitTransition getInitTransition(final int numSplits) {
  InitTransition initTransition = new InitTransition() {
    @Override
    protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
      TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numSplits];
      for (int i = 0; i < numSplits; ++i) {
        splits[i] = new TaskSplitMetaInfo();
      }
      return splits;
    }
  };
  return initTransition;
}
 
源代码27 项目: big-c   文件: TestTaskImpl.java
@Before 
@SuppressWarnings("unchecked")
public void setup() {
   dispatcher = new InlineDispatcher();
  
  ++startCount;
  
  conf = new JobConf();
  taskAttemptListener = mock(TaskAttemptListener.class);
  jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
  remoteJobConfFile = mock(Path.class);
  credentials = null;
  clock = new SystemClock();
  metrics = mock(MRAppMetrics.class);  
  dataLocations = new String[1];
  
  appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);

  jobId = Records.newRecord(JobId.class);
  jobId.setId(1);
  jobId.setAppId(appId);
  appContext = mock(AppContext.class);

  taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
  when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations); 
  
  taskAttempts = new ArrayList<MockTaskAttemptImpl>();    
}
 
源代码28 项目: big-c   文件: MRApp.java
@Override
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
  TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[maps];
  for (int i = 0; i < maps ; i++) {
    splits[i] = new TaskSplitMetaInfo();
  }
  return splits;
}
 
源代码29 项目: big-c   文件: LocalJobRunner.java
public MapTaskRunnable(TaskSplitMetaInfo info, int taskId, JobID jobId,
    Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {
  this.info = info;
  this.taskId = taskId;
  this.mapOutputFiles = mapOutputFiles;
  this.jobId = jobId;
  this.localConf = new JobConf(job);
}
 
源代码30 项目: big-c   文件: LocalJobRunner.java
/**
 * Create Runnables to encapsulate map tasks for use by the executor
 * service.
 * @param taskInfo Info about the map task splits
 * @param jobId the job id
 * @param mapOutputFiles a mapping from task attempts to output files
 * @return a List of Runnables, one per map task.
 */
protected List<RunnableWithThrowable> getMapTaskRunnables(
    TaskSplitMetaInfo [] taskInfo, JobID jobId,
    Map<TaskAttemptID, MapOutputFile> mapOutputFiles) {

  int numTasks = 0;
  ArrayList<RunnableWithThrowable> list =
      new ArrayList<RunnableWithThrowable>();
  for (TaskSplitMetaInfo task : taskInfo) {
    list.add(new MapTaskRunnable(task, numTasks++, jobId,
        mapOutputFiles));
  }

  return list;
}
 
 类方法
 同包方法