类org.apache.hadoop.fs.FsShell源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.FsShell的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop-ozone   文件: TestOzoneFsHAURLs.java
/**
 * Helper function for testOtherDefaultFS(),
 * run fs -ls o3fs:/// against different fs.defaultFS input.
 *
 * @param defaultFS Desired fs.defaultFS to be used in the test
 * @throws Exception
 */
private void testWithDefaultFS(String defaultFS) throws Exception {
  OzoneConfiguration clientConf = new OzoneConfiguration(conf);
  clientConf.setQuietMode(false);
  clientConf.set(o3fsImplKey, o3fsImplValue);
  // fs.defaultFS = file:///
  clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
      defaultFS);

  FsShell shell = new FsShell(clientConf);
  try {
    // Test case: ozone fs -ls o3fs:///
    // Expectation: Fail. fs.defaultFS is not a qualified o3fs URI.
    int res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"});
    Assert.assertEquals(res, -1);
  } finally {
    shell.close();
  }
}
 
源代码2 项目: hadoop   文件: TestDFSShellGenericOptions.java
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
        shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
源代码3 项目: hadoop   文件: TestFsShellPermission.java
public void execute(Configuration conf, FileSystem fs) throws Exception {
  fs.mkdirs(new Path(TEST_ROOT));

  createFiles(fs, TEST_ROOT, fileEntries);
  final FsShell fsShell = new FsShell(conf);
  final String deletePath =  TEST_ROOT + "/" + deleteEntry.getPath();

  String[] tmpCmdOpts = StringUtils.split(cmdAndOptions);
  ArrayList<String> tmpArray = new ArrayList<String>(Arrays.asList(tmpCmdOpts));
  tmpArray.add(deletePath);
  final String[] cmdOpts = tmpArray.toArray(new String[tmpArray.size()]);
  userUgi.doAs(new PrivilegedExceptionAction<String>() {
    public String run() throws Exception {
      return execCmd(fsShell, cmdOpts);
    }
  });

  boolean deleted = !fs.exists(new Path(deletePath));
  assertEquals(expectedToDelete, deleted);

  deldir(fs, TEST_ROOT);
}
 
源代码4 项目: hadoop   文件: TestSnapshotDeletion.java
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-deleteSnapshot", "/tmp"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码5 项目: hadoop   文件: TestSnapshotRename.java
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码6 项目: hadoop-gpu   文件: TestDFSShellGenericOptions.java
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
                                   shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
源代码7 项目: hadoop   文件: TestHadoopArchives.java
@Test
public void testRelativePathWitRepl() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  fs.mkdirs(sub1);
  createFile(inputPath, fs, sub1.getName(), "a");
  final FsShell shell = new FsShell(conf);

  final List<String> originalPaths = lsr(shell, "input");
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchiveWithRepl();

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码8 项目: hadoop   文件: TestHadoopArchives.java
@Test
public void testSingleFile() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  fs.mkdirs(sub1);
  String singleFileName = "a";
  createFile(inputPath, fs, sub1.getName(), singleFileName);
  final FsShell shell = new FsShell(conf);

  final List<String> originalPaths = lsr(shell, sub1.toString());
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchive(sub1, singleFileName);

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码9 项目: hadoop   文件: TestHadoopArchives.java
@Test
public void testGlobFiles() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  final Path sub2 = new Path(inputPath, "dir2");
  fs.mkdirs(sub1);
  String fileName = "a";
  createFile(inputPath, fs, sub1.getName(), fileName);
  createFile(inputPath, fs, sub2.getName(), fileName);
  createFile(inputPath, fs, sub1.getName(), "b"); // not part of result

  final String glob =  "dir{1,2}/a";
  final FsShell shell = new FsShell(conf);
  final List<String> originalPaths = lsr(shell, inputPath.toString(),
      inputPath + "/" + glob);
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchive(inputPath, glob);

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr,
      fullHarPathStr + "/" + glob);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码10 项目: big-c   文件: TestDFSShellGenericOptions.java
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
        shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
源代码11 项目: big-c   文件: TestFsShellPermission.java
public void execute(Configuration conf, FileSystem fs) throws Exception {
  fs.mkdirs(new Path(TEST_ROOT));

  createFiles(fs, TEST_ROOT, fileEntries);
  final FsShell fsShell = new FsShell(conf);
  final String deletePath =  TEST_ROOT + "/" + deleteEntry.getPath();

  String[] tmpCmdOpts = StringUtils.split(cmdAndOptions);
  ArrayList<String> tmpArray = new ArrayList<String>(Arrays.asList(tmpCmdOpts));
  tmpArray.add(deletePath);
  final String[] cmdOpts = tmpArray.toArray(new String[tmpArray.size()]);
  userUgi.doAs(new PrivilegedExceptionAction<String>() {
    public String run() throws Exception {
      return execCmd(fsShell, cmdOpts);
    }
  });

  boolean deleted = !fs.exists(new Path(deletePath));
  assertEquals(expectedToDelete, deleted);

  deldir(fs, TEST_ROOT);
}
 
源代码12 项目: big-c   文件: TestSnapshotDeletion.java
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-deleteSnapshot", "/tmp"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码13 项目: big-c   文件: TestXAttrWithSnapshot.java
/**
 * Test that users can copy a snapshot while preserving its xattrs.
 */
@Test (timeout = 120000)
public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
  FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
  hdfs.setXAttr(path, name1, value1);
  hdfs.setXAttr(path, name2, value2);
  SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
  Path snapshotCopy = new Path(path.toString() + "-copy");
  String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(),
      snapshotCopy.toUri().toString() };
  int ret = ToolRunner.run(new FsShell(conf), argv);
  assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);

  Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy);
  assertArrayEquals(value1, xattrs.get(name1));
  assertArrayEquals(value2, xattrs.get(name2));
}
 
源代码14 项目: big-c   文件: TestSnapshotRename.java
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码15 项目: big-c   文件: TestHadoopArchives.java
@Test
public void testRelativePath() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  fs.mkdirs(sub1);
  createFile(inputPath, fs, sub1.getName(), "a");
  final FsShell shell = new FsShell(conf);

  final List<String> originalPaths = lsr(shell, "input");
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchive();

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码16 项目: big-c   文件: TestHadoopArchives.java
@Test
public void testRelativePathWitRepl() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  fs.mkdirs(sub1);
  createFile(inputPath, fs, sub1.getName(), "a");
  final FsShell shell = new FsShell(conf);

  final List<String> originalPaths = lsr(shell, "input");
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchiveWithRepl();

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码17 项目: big-c   文件: TestHadoopArchives.java
@Test
public void testPathWithSpaces() throws Exception {
  // create files/directories with spaces
  createFile(inputPath, fs, "c c");
  final Path sub1 = new Path(inputPath, "sub 1");
  fs.mkdirs(sub1);
  createFile(sub1, fs, "file x y z");
  createFile(sub1, fs, "file");
  createFile(sub1, fs, "x");
  createFile(sub1, fs, "y");
  createFile(sub1, fs, "z");
  final Path sub2 = new Path(inputPath, "sub 1 with suffix");
  fs.mkdirs(sub2);
  createFile(sub2, fs, "z");

  final FsShell shell = new FsShell(conf);
  final String inputPathStr = inputPath.toUri().getPath();
  final List<String> originalPaths = lsr(shell, inputPathStr);

  // make the archive:
  final String fullHarPathStr = makeArchive();

  // compare results
  final List<String> harPaths = lsr(shell, fullHarPathStr);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码18 项目: big-c   文件: TestHadoopArchives.java
@Test
public void testSingleFile() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  fs.mkdirs(sub1);
  String singleFileName = "a";
  createFile(inputPath, fs, sub1.getName(), singleFileName);
  final FsShell shell = new FsShell(conf);

  final List<String> originalPaths = lsr(shell, sub1.toString());
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchive(sub1, singleFileName);

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码19 项目: big-c   文件: TestHadoopArchives.java
@Test
public void testGlobFiles() throws Exception {
  final Path sub1 = new Path(inputPath, "dir1");
  final Path sub2 = new Path(inputPath, "dir2");
  fs.mkdirs(sub1);
  String fileName = "a";
  createFile(inputPath, fs, sub1.getName(), fileName);
  createFile(inputPath, fs, sub2.getName(), fileName);
  createFile(inputPath, fs, sub1.getName(), "b"); // not part of result

  final String glob =  "dir{1,2}/a";
  final FsShell shell = new FsShell(conf);
  final List<String> originalPaths = lsr(shell, inputPath.toString(),
      inputPath + "/" + glob);
  System.out.println("originalPaths: " + originalPaths);

  // make the archive:
  final String fullHarPathStr = makeArchive(inputPath, glob);

  // compare results:
  final List<String> harPaths = lsr(shell, fullHarPathStr,
      fullHarPathStr + "/" + glob);
  Assert.assertEquals(originalPaths, harPaths);
}
 
源代码20 项目: hadoop-gpu   文件: TestDFSShell.java
public void testLsr() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

  try {
    final String root = createTree(dfs, "lsr");
    dfs.mkdirs(new Path(root, "zzz"));
    
    runLsr(new FsShell(conf), root, 0);
    
    final Path sub = new Path(root, "sub");
    dfs.setPermission(sub, new FsPermission((short)0));

    final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
    final String tmpusername = ugi.getUserName() + "1";
    UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation(
        tmpusername, new String[] {tmpusername});
    UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
    String results = runLsr(new FsShell(conf), root, -1);
    assertTrue(results.contains("zzz"));
  } finally {
    cluster.shutdown();
  }
}
 
源代码21 项目: spork   文件: Pig.java
/**
 * Run a filesystem command.  Any output from this command is written to
 * stdout or stderr as appropriate.
 * @param cmd Filesystem command to run along with its arguments as one
 * string.
 * @throws IOException
 */
public static int fs(String cmd) throws IOException {
    ScriptPigContext ctx = getScriptContext();
    FsShell shell = new FsShell(ConfigurationUtil.toConfiguration(ctx
            .getPigContext().getProperties()));
    int code = -1;
    if (cmd != null) {
        String[] cmdTokens = cmd.split("\\s+");
        if (!cmdTokens[0].startsWith("-")) cmdTokens[0] = "-" + cmdTokens[0];
        try {
            code = shell.run(cmdTokens);
        } catch (Exception e) {
            throw new IOException("Run filesystem command failed", e);
        }
    }
    return code;
}
 
源代码22 项目: RDFS   文件: TestDFSShellGenericOptions.java
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
                                   shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
源代码23 项目: RDFS   文件: TestDFSShell.java
public void testLsr() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

  try {
    final String root = createTree(dfs, "lsr");
    dfs.mkdirs(new Path(root, "zzz"));

    runLsr(new FsShell(conf), root, 0);

    final Path sub = new Path(root, "sub");
    dfs.setPermission(sub, new FsPermission((short)0));

    final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
    final String tmpusername = ugi.getUserName() + "1";
    UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation(
        tmpusername, new String[] {tmpusername});
    UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
    String results = runLsr(new FsShell(conf), root, -1);
    assertTrue(results.contains("zzz"));
  } finally {
    cluster.shutdown();
  }
}
 
源代码24 项目: hadoop   文件: TestFsShellPermission.java
static String execCmd(FsShell shell, final String[] args) throws Exception {
  ByteArrayOutputStream baout = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(baout, true);
  PrintStream old = System.out;
  System.setOut(out);
  int ret = shell.run(args);
  out.close();
  System.setOut(old);
  return String.valueOf(ret);
}
 
源代码25 项目: hadoop-gpu   文件: TestUnderReplicatedBlocks.java
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
  Configuration conf = new Configuration();
  final short REPLICATION_FACTOR = 2;
  final String FILE_NAME = "/testFile";
  final Path FILE_PATH = new Path(FILE_NAME);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR+1, true, null);
  try {
    // create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
    
    // remove one replica from the blocksMap so block becomes under-replicated
    // but the block does not get put into the under-replicated blocks queue
    FSNamesystem namesystem = cluster.getNameNode().namesystem;
    Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
    DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next();
    namesystem.addToInvalidates(b, dn);
    namesystem.blocksMap.removeNode(b, dn);
    
    // increment this file's replication factor
    FsShell shell = new FsShell(conf);
    assertEquals(0, shell.run(new String[]{
        "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
  } finally {
    cluster.shutdown();
  }
  
}
 
源代码26 项目: hadoop-gpu   文件: TestDFSShell.java
public void testCount() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, conf);
    runCount(root + "2", 2, 1, conf);
    runCount(root + "2/f1", 0, 1, conf);
    runCount(root + "2/sub", 1, 0, conf);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath.makeQualified(localfs);
    localfs.mkdirs(localpath);
    
    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, conf);
    assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
源代码27 项目: hadoop   文件: TestUnderReplicatedBlocks.java
@Test(timeout=60000) // 1 min timeout
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final short REPLICATION_FACTOR = 2;
  final String FILE_NAME = "/testFile";
  final Path FILE_PATH = new Path(FILE_NAME);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
  try {
    // create a file with one block with a replication factor of 2
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
    DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
    
    // remove one replica from the blocksMap so block becomes under-replicated
    // but the block does not get put into the under-replicated blocks queue
    final BlockManager bm = cluster.getNamesystem().getBlockManager();
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
    DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock())
        .iterator().next().getDatanodeDescriptor();
    bm.addToInvalidates(b.getLocalBlock(), dn);
    Thread.sleep(5000);
    bm.blocksMap.removeNode(b.getLocalBlock(), dn);
    
    // increment this file's replication factor
    FsShell shell = new FsShell(conf);
    assertEquals(0, shell.run(new String[]{
        "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
  } finally {
    cluster.shutdown();
  }
  
}
 
源代码28 项目: hadoop-gpu   文件: TestDFSShell.java
/**
 * Test chmod.
 */
void testChmod(Configuration conf, FileSystem fs, String chmodDir) 
                                                  throws IOException {
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  try {
   //first make dir
   Path dir = new Path(chmodDir);
   fs.delete(dir, true);
   fs.mkdirs(dir);

   runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir);
   assertEquals("rwxrw----",
                fs.getFileStatus(dir).getPermission().toString());

   //create an empty file
   Path file = new Path(chmodDir, "file");
   TestDFSShell.writeFile(fs, file);

   //test octal mode
   runCmd(shell, "-chmod", "644", file.toString());
   assertEquals("rw-r--r--",
                fs.getFileStatus(file).getPermission().toString());

   //test recursive
   runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
   assertEquals("rwxrwxrwx",
                fs.getFileStatus(dir).getPermission().toString()); 
   assertEquals("rw-rw-rw-",
                fs.getFileStatus(file).getPermission().toString());
   
   fs.delete(dir, true);     
  } finally {
    try {
      fs.close();
      shell.close();
    } catch (IOException ignored) {}
  }
}
 
源代码29 项目: hadoop   文件: TestCopyFiles.java
static String execCmd(FsShell shell, String... args) throws Exception {
  ByteArrayOutputStream baout = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(baout, true);
  PrintStream old = System.out;
  System.setOut(out);
  shell.run(args);
  out.close();
  System.setOut(old);
  return baout.toString();
}
 
源代码30 项目: hadoop   文件: TestAclCommands.java
@Test
public void testLsNoRpcForGetAclStatus() throws Exception {
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
  conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
  conf.setBoolean("stubfs.noRpcForGetAclStatus", true);
  assertEquals("ls must succeed even if getAclStatus RPC does not exist.",
    0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
 
 类所在包
 类方法
 同包方法