类org.apache.hadoop.mapred.MiniMRClientClusterFactory源码实例Demo

下面列出了怎么用org.apache.hadoop.mapred.MiniMRClientClusterFactory的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestMRCredentials.java
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
  System.setProperty("hadoop.log.dir", "logs");
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
      .build();
  jConf = new JobConf(conf);
  FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
  mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
  createKeysAsJson("keys.json");
}
 
源代码2 项目: hadoop   文件: TestEncryptedShuffle.java
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
源代码3 项目: hadoop   文件: GridmixTestUtils.java
public static void initCluster(Class<?> caller) throws IOException {
    Configuration conf = new Configuration();
//    conf.set("mapred.queue.names", "default,q1,q2");
  conf.set("mapred.queue.names", "default");
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
    
    
    conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
    conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
    

    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
        .build();// MiniDFSCluster(conf, 3, true, null);
    dfs = dfsCluster.getFileSystem();
    conf.set(JTConfig.JT_RETIREJOBS, "false");
    mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
    
    conf = mrvl.getConfig();
    String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
    if (files != null) {
      String[] timestamps = new String[files.length];
      for (int i = 0; i < files.length; i++) {
        timestamps[i] = Long.toString(System.currentTimeMillis());
      }
      conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
    }
    
  }
 
源代码4 项目: big-c   文件: TestMRCredentials.java
@SuppressWarnings("deprecation")
@BeforeClass
public static void setUp() throws Exception {
  System.setProperty("hadoop.log.dir", "logs");
  Configuration conf = new Configuration();
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves)
      .build();
  jConf = new JobConf(conf);
  FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
  mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.class, 1, jConf);
  createKeysAsJson("keys.json");
}
 
源代码5 项目: big-c   文件: TestEncryptedShuffle.java
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
源代码6 项目: big-c   文件: GridmixTestUtils.java
public static void initCluster(Class<?> caller) throws IOException {
    Configuration conf = new Configuration();
//    conf.set("mapred.queue.names", "default,q1,q2");
  conf.set("mapred.queue.names", "default");
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
    
    
    conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
    conf.set(GRIDMIX_DEFAULT_QUEUE, "default");
    

    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
        .build();// MiniDFSCluster(conf, 3, true, null);
    dfs = dfsCluster.getFileSystem();
    conf.set(JTConfig.JT_RETIREJOBS, "false");
    mrvl = MiniMRClientClusterFactory.create(caller, 2, conf);
    
    conf = mrvl.getConfig();
    String[] files = conf.getStrings(MRJobConfig.CACHE_FILES);
    if (files != null) {
      String[] timestamps = new String[files.length];
      for (int i = 0; i < files.length; i++) {
        timestamps[i] = Long.toString(System.currentTimeMillis());
      }
      conf.setStrings(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
    }
    
  }
 
源代码7 项目: mr4c   文件: YarnTestBinding.java
private void startMrCluster() throws IOException {
	Configuration conf = new JobConf();
	FileSystem.setDefaultUri(conf, HadoopTestUtils.getTestDFS().getUri());
	conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
	conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
	String addr = MiniYARNCluster.getHostname() + ":0";
	conf.set(YarnConfiguration.RM_ADDRESS, addr);
	conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, addr);
	m_mrCluster = MiniMRClientClusterFactory.create(
		HadoopTestUtils.class,
		"MR4CTests",
		1, // num node managers
		conf
	);

	// make sure startup is finished
	for ( int i=0; i<60; i++ ) {
		String newAddr = m_mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
		if ( newAddr.equals(addr) ) {
			s_log.warn("MiniYARNCluster startup not complete");
			try {
				Thread.sleep(1000);
			} catch (InterruptedException ie) {
				throw new IOException(ie);
			}
		} else {
			s_log.info("MiniYARNCluster now available at {}", newAddr);
			return;
		}
	}
	throw new IOException("MiniYARNCluster taking too long to startup");

}
 
源代码8 项目: hadoop   文件: TestLargeSort.java
@Before
public void setup() throws IOException {
  Configuration conf = new YarnConfiguration();
  cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
  cluster.start();
}
 
源代码9 项目: hadoop   文件: MiniHadoopClusterManager.java
/**
 * Starts DFS and MR clusters, as specified in member-variable options. Also
 * writes out configuration and details, if requested.
 *
 * @throws IOException
 * @throws FileNotFoundException
 * @throws URISyntaxException
 */
public void start() throws IOException, FileNotFoundException,
    URISyntaxException {
  if (!noDFS) {
    dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
        .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
    LOG.info("Started MiniDFSCluster -- namenode on port "
        + dfs.getNameNodePort());
  }
  if (!noMR) {
    if (fs == null && dfs != null) {
      fs = dfs.getFileSystem().getUri().toString();
    } else if (fs == null) {
      fs = "file:///tmp/minimr-" + System.nanoTime();
    }
    FileSystem.setDefaultUri(conf, new URI(fs));
    // Instruct the minicluster to use fixed ports, so user will know which
    // ports to use when communicating with the cluster.
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
    conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.rmPort);
    conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.jhsPort);
    mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
        conf);
    LOG.info("Started MiniMRCluster");
  }

  if (writeConfig != null) {
    FileOutputStream fos = new FileOutputStream(new File(writeConfig));
    conf.writeXml(fos);
    fos.close();
  }

  if (writeDetails != null) {
    Map<String, Object> map = new TreeMap<String, Object>();
    if (dfs != null) {
      map.put("namenode_port", dfs.getNameNodePort());
    }
    if (mr != null) {
      map.put("resourcemanager_port", mr.getConfig().get(
          YarnConfiguration.RM_ADDRESS).split(":")[1]);
    }
    FileWriter fw = new FileWriter(new File(writeDetails));
    fw.write(new JSON().toJSON(map));
    fw.close();
  }
}
 
源代码10 项目: hadoop   文件: TestDistCh.java
public void testDistCh() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
  final MiniDFSCluster cluster=  new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
  
  final FileSystem fs = cluster.getFileSystem();
  final FsShell shell = new FsShell(conf);
  
  try {
    final FileTree tree = new FileTree(fs, "testDistCh");
    final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);

    runLsr(shell, tree.root, 0);

    final String[] args = new String[NUN_SUBS];
    final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS];

    
    args[0]="/test/testDistCh/sub0:sub1::";
    newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");

    args[1]="/test/testDistCh/sub1::sub2:";
    newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");

    args[2]="/test/testDistCh/sub2:::437";
    newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");

    args[3]="/test/testDistCh/sub3:sub1:sub2:447";
    newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
 
    args[4]="/test/testDistCh/sub4::sub5:437";
    newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");

    args[5]="/test/testDistCh/sub5:sub1:sub5:";
    newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");

    args[6]="/test/testDistCh/sub6:sub3::437";
    newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
    
    System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n  "));
    System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n  "));

    //run DistCh
    new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
    runLsr(shell, tree.root, 0);

    //check results
    for(int i = 0; i < NUN_SUBS; i++) {
      Path sub = new Path(tree.root + "/sub" + i);
      checkFileStatus(newstatus[i], fs.getFileStatus(sub));
      for(FileStatus status : fs.listStatus(sub)) {
        checkFileStatus(newstatus[i], status);
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
源代码11 项目: big-c   文件: TestLargeSort.java
@Before
public void setup() throws IOException {
  Configuration conf = new YarnConfiguration();
  cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
  cluster.start();
}
 
源代码12 项目: big-c   文件: MiniHadoopClusterManager.java
/**
 * Starts DFS and MR clusters, as specified in member-variable options. Also
 * writes out configuration and details, if requested.
 *
 * @throws IOException
 * @throws FileNotFoundException
 * @throws URISyntaxException
 */
public void start() throws IOException, FileNotFoundException,
    URISyntaxException {
  if (!noDFS) {
    dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nnPort)
        .numDataNodes(numDataNodes).startupOption(dfsOpts).build();
    LOG.info("Started MiniDFSCluster -- namenode on port "
        + dfs.getNameNodePort());
  }
  if (!noMR) {
    if (fs == null && dfs != null) {
      fs = dfs.getFileSystem().getUri().toString();
    } else if (fs == null) {
      fs = "file:///tmp/minimr-" + System.nanoTime();
    }
    FileSystem.setDefaultUri(conf, new URI(fs));
    // Instruct the minicluster to use fixed ports, so user will know which
    // ports to use when communicating with the cluster.
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    conf.setBoolean(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, true);
    conf.set(YarnConfiguration.RM_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.rmPort);
    conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, MiniYARNCluster.getHostname()
        + ":" + this.jhsPort);
    mr = MiniMRClientClusterFactory.create(this.getClass(), numNodeManagers,
        conf);
    LOG.info("Started MiniMRCluster");
  }

  if (writeConfig != null) {
    FileOutputStream fos = new FileOutputStream(new File(writeConfig));
    conf.writeXml(fos);
    fos.close();
  }

  if (writeDetails != null) {
    Map<String, Object> map = new TreeMap<String, Object>();
    if (dfs != null) {
      map.put("namenode_port", dfs.getNameNodePort());
    }
    if (mr != null) {
      map.put("resourcemanager_port", mr.getConfig().get(
          YarnConfiguration.RM_ADDRESS).split(":")[1]);
    }
    FileWriter fw = new FileWriter(new File(writeDetails));
    fw.write(new JSON().toJSON(map));
    fw.close();
  }
}
 
源代码13 项目: big-c   文件: TestDistCh.java
public void testDistCh() throws Exception {
  final Configuration conf = new Configuration();

  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+"."+CapacitySchedulerConfiguration.QUEUES, "default");
  conf.set(CapacitySchedulerConfiguration.PREFIX+CapacitySchedulerConfiguration.ROOT+".default."+CapacitySchedulerConfiguration.CAPACITY, "100");
  final MiniDFSCluster cluster=  new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
  
  final FileSystem fs = cluster.getFileSystem();
  final FsShell shell = new FsShell(conf);
  
  try {
    final FileTree tree = new FileTree(fs, "testDistCh");
    final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);

    runLsr(shell, tree.root, 0);

    final String[] args = new String[NUN_SUBS];
    final ChPermissionStatus[] newstatus = new ChPermissionStatus[NUN_SUBS];

    
    args[0]="/test/testDistCh/sub0:sub1::";
    newstatus[0] = new ChPermissionStatus(rootstatus, "sub1", "", "");

    args[1]="/test/testDistCh/sub1::sub2:";
    newstatus[1] = new ChPermissionStatus(rootstatus, "", "sub2", "");

    args[2]="/test/testDistCh/sub2:::437";
    newstatus[2] = new ChPermissionStatus(rootstatus, "", "", "437");

    args[3]="/test/testDistCh/sub3:sub1:sub2:447";
    newstatus[3] = new ChPermissionStatus(rootstatus, "sub1", "sub2", "447");
 
    args[4]="/test/testDistCh/sub4::sub5:437";
    newstatus[4] = new ChPermissionStatus(rootstatus, "", "sub5", "437");

    args[5]="/test/testDistCh/sub5:sub1:sub5:";
    newstatus[5] = new ChPermissionStatus(rootstatus, "sub1", "sub5", "");

    args[6]="/test/testDistCh/sub6:sub3::437";
    newstatus[6] = new ChPermissionStatus(rootstatus, "sub3", "", "437");
    
    System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n  "));
    System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n  "));

    //run DistCh
    new DistCh(MiniMRClientClusterFactory.create(this.getClass(), 2, conf).getConfig()).run(args);
    runLsr(shell, tree.root, 0);

    //check results
    for(int i = 0; i < NUN_SUBS; i++) {
      Path sub = new Path(tree.root + "/sub" + i);
      checkFileStatus(newstatus[i], fs.getFileStatus(sub));
      for(FileStatus status : fs.listStatus(sub)) {
        checkFileStatus(newstatus[i], status);
      }
    }
  } finally {
    cluster.shutdown();
  }
}
 
 类所在包
 类方法
 同包方法