org.apache.hadoop.fs.FsShell#setConf ( )源码实例Demo

下面列出了org.apache.hadoop.fs.FsShell#setConf ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: TestSnapshotDeletion.java
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-deleteSnapshot", "/tmp"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码2 项目: hadoop   文件: TestSnapshotRename.java
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码3 项目: big-c   文件: TestSnapshotDeletion.java
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-deleteSnapshot", "/tmp"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码4 项目: big-c   文件: TestSnapshotRename.java
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  PrintStream psOut = new PrintStream(out);
  System.setOut(psOut);
  System.setErr(psOut);
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
  int val = shell.run(argv1);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv1[0] + ": Incorrect number of arguments."));
  out.reset();
  
  String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
  val = shell.run(argv2);
  assertTrue(val == -1);
  assertTrue(out.toString().contains(
      argv2[0] + ": Incorrect number of arguments."));
  psOut.close();
  out.close();
}
 
源代码5 项目: RDFS   文件: TestDFSShell.java
public void testCount() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, conf);
    runCount(root + "2", 2, 1, conf);
    runCount(root + "2/f1", 0, 1, conf);
    runCount(root + "2/sub", 1, 0, conf);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath.makeQualified(localfs);
    localfs.mkdirs(localpath);

    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, conf);
    assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
源代码6 项目: RDFS   文件: TestDFSShell.java
/**
 * Test chmod.
 */
void testChmod(Configuration conf, FileSystem fs, String chmodDir)
  throws IOException {
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    //first make dir
    Path dir = new Path(chmodDir);
    fs.delete(dir, true);
    fs.mkdirs(dir);

    runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir);
    assertEquals("rwxrw----",
                 fs.getFileStatus(dir).getPermission().toString());

    //create an empty file
    Path file = new Path(chmodDir, "file");
    TestDFSShell.writeFile(fs, file);

    //test octal mode
    runCmd(shell, "-chmod", "644", file.toString());
    assertEquals("rw-r--r--",
                 fs.getFileStatus(file).getPermission().toString());

    //test recursive
    runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
    assertEquals("rwxrwxrwx",
                 fs.getFileStatus(dir).getPermission().toString());
    assertEquals("rw-rw-rw-",
                 fs.getFileStatus(file).getPermission().toString());

    fs.delete(dir, true);
  } finally {
    try {
      fs.close();
      shell.close();
    } catch (IOException ignored) {}
  }
}
 
源代码7 项目: hadoop-gpu   文件: TestDFSShell.java
public void testCount() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    String root = createTree(dfs, "count");

    // Verify the counts
    runCount(root, 2, 4, conf);
    runCount(root + "2", 2, 1, conf);
    runCount(root + "2/f1", 0, 1, conf);
    runCount(root + "2/sub", 1, 0, conf);

    final FileSystem localfs = FileSystem.getLocal(conf);
    Path localpath = new Path(TEST_ROOT_DIR, "testcount");
    localpath = localpath.makeQualified(localfs);
    localfs.mkdirs(localpath);
    
    final String localstr = localpath.toString();
    System.out.println("localstr=" + localstr);
    runCount(localstr, 1, 0, conf);
    assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
  } finally {
    try {
      dfs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
源代码8 项目: hadoop-gpu   文件: TestDFSShell.java
/**
 * Test chmod.
 */
void testChmod(Configuration conf, FileSystem fs, String chmodDir) 
                                                  throws IOException {
  FsShell shell = new FsShell();
  shell.setConf(conf);
  
  try {
   //first make dir
   Path dir = new Path(chmodDir);
   fs.delete(dir, true);
   fs.mkdirs(dir);

   runCmd(shell, "-chmod", "u+rwx,g=rw,o-rwx", chmodDir);
   assertEquals("rwxrw----",
                fs.getFileStatus(dir).getPermission().toString());

   //create an empty file
   Path file = new Path(chmodDir, "file");
   TestDFSShell.writeFile(fs, file);

   //test octal mode
   runCmd(shell, "-chmod", "644", file.toString());
   assertEquals("rw-r--r--",
                fs.getFileStatus(file).getPermission().toString());

   //test recursive
   runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
   assertEquals("rwxrwxrwx",
                fs.getFileStatus(dir).getPermission().toString()); 
   assertEquals("rw-rw-rw-",
                fs.getFileStatus(file).getPermission().toString());
   
   fs.delete(dir, true);     
  } finally {
    try {
      fs.close();
      shell.close();
    } catch (IOException ignored) {}
  }
}
 
源代码9 项目: RDFS   文件: TestTrash.java
protected void trashEmptier(FileSystem fs, Configuration conf) throws Exception {
  // Trash with 12 second deletes and 6 seconds checkpoints
  conf.set("fs.trash.interval", "0.2"); // 12 seconds
  conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds
  Trash trash = new Trash(conf);
  // clean up trash can
  fs.delete(trash.getCurrentTrashDir().getParent(), true);

  // Start Emptier in background
  Runnable emptier = trash.getEmptier();
  Thread emptierThread = new Thread(emptier);
  emptierThread.start();

  FsShell shell = new FsShell();
  shell.setConf(conf);
  shell.init();
  // First create a new directory with mkdirs
  Path myPath = new Path(TEST_DIR, "test/mkdirs");
  mkdir(fs, myPath);
  int fileIndex = 0;
  Set<String> checkpoints = new HashSet<String>();
  while (true)  {
    // Create a file with a new name
    Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
    writeFile(fs, myFile);

    // Delete the file to trash
    assertTrue(rmUsingShell(shell, myFile) == 0);

    Path trashDir = shell.getCurrentTrashDir();
    FileStatus files[] = fs.listStatus(trashDir.getParent());
    // Scan files in .Trash and add them to set of checkpoints
    for (FileStatus file : files) {
      String fileName = file.getPath().getName();
      checkpoints.add(fileName);
    }
    // If checkpoints has 5 objects it is Current + 4 checkpoint directories
    if (checkpoints.size() == 5) {
      // The actual contents should be smaller since the last checkpoint
      // should've been deleted and Current might not have been recreated yet
      assertTrue(5 > files.length);
      break;
    }
    Thread.sleep(5000);
  }
  emptierThread.interrupt();
  emptierThread.join();
}
 
源代码10 项目: RDFS   文件: TestTrash.java
/**
 * @param fs
 * @param conf
 * @throws Exception
 */
protected void trashPatternEmptier(FileSystem fs, Configuration conf) throws Exception {
  // Trash with 12 second deletes and 6 seconds checkpoints
  conf.set("fs.trash.interval", "0.2"); // 12 seconds
  conf.set("fs.trash.checkpoint.interval", "0.1"); // 6 seconds
  conf.setClass("fs.trash.classname", TrashPolicyPattern.class, TrashPolicy.class);
  conf.set("fs.trash.base.paths", TEST_DIR + "/my_root/*/");
  conf.set("fs.trash.unmatched.paths", TEST_DIR + "/unmatched/");
  Trash trash = new Trash(conf);
  // clean up trash can
  fs.delete(new Path(TEST_DIR + "/my_root/*/"), true);
  fs.delete(new Path(TEST_DIR + "/my_root_not/*/"), true);


  FsShell shell = new FsShell();
  shell.setConf(conf);
  shell.init();
  // First create a new directory with mkdirs
  deleteAndCheckTrash(fs, shell, "my_root/sub_dir1/sub_dir1_1/myFile",
      "my_root/sub_dir1/.Trash/Current/" + TEST_DIR
          + "/my_root/sub_dir1/sub_dir1_1");
  deleteAndCheckTrash(fs, shell, "my_root/sub_dir2/sub_dir2_1/myFile",
      "my_root/sub_dir2/.Trash/Current/" + TEST_DIR
          + "/my_root/sub_dir2/sub_dir2_1");
  deleteAndCheckTrash(fs, shell, "my_root_not/", "unmatched/.Trash/Current"
      + TEST_DIR + "/my_root_not");
  deleteAndCheckTrash(fs, shell, "my_root/file", "unmatched/.Trash/Current"
      + TEST_DIR + "/my_root/file");

  Path currentTrash = new Path(TEST_DIR, "my_root/sub_dir1/.Trash/Current/");
  fs.mkdirs(currentTrash);
  cmdUsingShell("-rmr", shell, currentTrash);
  TestCase.assertTrue(!fs.exists(currentTrash));

  cmdUsingShell("-rmr", shell, new Path(TEST_DIR, "my_root"));
  TestCase.assertTrue(fs.exists(new Path(TEST_DIR,
      "unmatched/.Trash/Current/" + TEST_DIR + "/my_root")));
  
  // Test Emplier
  // Start Emptier in background
  Runnable emptier = trash.getEmptier();
  Thread emptierThread = new Thread(emptier);
  emptierThread.start();

  int fileIndex = 0;
  Set<String> checkpoints = new HashSet<String>();
  while (true)  {
    // Create a file with a new name
    Path myFile = new Path(TEST_DIR, "my_root/sub_dir1/sub_dir2/myFile" + fileIndex++);
    writeFile(fs, myFile);

    // Delete the file to trash
    String[] args = new String[2];
    args[0] = "-rm";
    args[1] = myFile.toString();
    int val = -1;
    try {
      val = shell.run(args);
    } catch (Exception e) {
      System.err.println("Exception raised from Trash.run " +
                         e.getLocalizedMessage());
    }
    assertTrue(val == 0);

    Path trashDir = new Path(TEST_DIR, "my_root/sub_dir1/.Trash/Current/");
    FileStatus files[] = fs.listStatus(trashDir.getParent());
    // Scan files in .Trash and add them to set of checkpoints
    for (FileStatus file : files) {
      String fileName = file.getPath().getName();
      checkpoints.add(fileName);
    }
    // If checkpoints has 5 objects it is Current + 4 checkpoint directories
    if (checkpoints.size() == 5) {
      // The actual contents should be smaller since the last checkpoint
      // should've been deleted and Current might not have been recreated yet
      assertTrue(5 > files.length);
      break;
    }
    Thread.sleep(5000);
  }
  emptierThread.interrupt();
  emptierThread.join();
}
 
源代码11 项目: RDFS   文件: TestDFSShell.java
public void testURIPaths() throws Exception {
  Configuration srcConf = new Configuration();
  Configuration dstConf = new Configuration();
  MiniDFSCluster srcCluster =  null;
  MiniDFSCluster dstCluster = null;
  String bak = System.getProperty("test.build.data");
  try{
    srcCluster = new MiniDFSCluster(srcConf, 2, true, null);
    File nameDir = new File(new File(bak), "dfs_tmp_uri/");
    nameDir.mkdirs();
    System.setProperty("test.build.data", nameDir.toString());
    dstCluster = new MiniDFSCluster(dstConf, 2, true, null);
    FileSystem srcFs = srcCluster.getFileSystem();
    FileSystem dstFs = dstCluster.getFileSystem();
    FsShell shell = new FsShell();
    shell.setConf(srcConf);
    //check for ls
    String[] argv = new String[2];
    argv[0] = "-ls";
    argv[1] = dstFs.getUri().toString() + "/";
    int ret = ToolRunner.run(shell, argv);
    assertTrue("ls works on remote uri ", (ret==0));
    //check for rm -r
    dstFs.mkdirs(new Path("/hadoopdir"));
    argv = new String[2];
    argv[0] = "-rmr";
    argv[1] = dstFs.getUri().toString() + "/hadoopdir";
    ret = ToolRunner.run(shell, argv);
    assertTrue("-rmr works on remote uri " + argv[1], (ret==0));
    //check du
    argv[0] = "-du";
    argv[1] = dstFs.getUri().toString() + "/";
    ret = ToolRunner.run(shell, argv);
    assertTrue("du works on remote uri ", (ret ==0));
    //check put
    File furi = new File(TEST_ROOT_DIR, "furi");
    createLocalFile(furi);
    argv = new String[3];
    argv[0] = "-put";
    argv[1] = furi.toString();
    argv[2] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" put is working ", (ret==0));
    //check cp
    argv[0] = "-cp";
    argv[1] = dstFs.getUri().toString() + "/furi";
    argv[2] = srcFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" cp is working ", (ret==0));
    assertTrue(srcFs.exists(new Path("/furi")));
    //check cat
    argv = new String[2];
    argv[0] = "-cat";
    argv[1] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" cat is working ", (ret == 0));
    //check chown
    dstFs.delete(new Path("/furi"), true);
    dstFs.delete(new Path("/hadoopdir"), true);
    String file = "/tmp/chownTest";
    Path path = new Path(file);
    Path parent = new Path("/tmp");
    Path root = new Path("/");
    TestDFSShell.writeFile(dstFs, path);
    runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
    confirmOwner(null, "herbivores", dstFs, parent, path);
    runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
    confirmOwner(null, "reptiles", dstFs, root, parent, path);
    //check if default hdfs:/// works
    argv[0] = "-cat";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" default works for cat", (ret == 0));
    argv[0] = "-ls";
    argv[1] = "hdfs:///";
    ret = ToolRunner.run(shell, argv);
    assertTrue("default works for ls ", (ret == 0));
    argv[0] = "-rmr";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue("default works for rm/rmr", (ret ==0));
  } finally {
    System.setProperty("test.build.data", bak);
    if (null != srcCluster) {
      srcCluster.shutdown();
    }
    if (null != dstCluster) {
      dstCluster.shutdown();
    }
  }
}
 
源代码12 项目: RDFS   文件: TestDFSShell.java
public void testFilePermissions() throws IOException {
  Configuration conf = new Configuration();

  //test chmod on local fs
  FileSystem fs = FileSystem.getLocal(conf);
  testChmod(conf, fs,
            (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());

  conf.set("dfs.permissions", "true");

  //test chmod on DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  fs = cluster.getFileSystem();
  testChmod(conf, fs, "/tmp/chmodTest");

  // test chown and chgrp on DFS:

  FsShell shell = new FsShell();
  shell.setConf(conf);
  fs = cluster.getFileSystem();

  /* For dfs, I am the super user and I can change ower of any file to
   * anything. "-R" option is already tested by chmod test above.
   */

  String file = "/tmp/chownTest";
  Path path = new Path(file);
  Path parent = new Path("/tmp");
  Path root = new Path("/");
  TestDFSShell.writeFile(fs, path);

  runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
  confirmOwner(null, "herbivores", fs, parent, path);

  runCmd(shell, "-chgrp", "mammals", file);
  confirmOwner(null, "mammals", fs, path);

  runCmd(shell, "-chown", "-R", ":reptiles", "/");
  confirmOwner(null, "reptiles", fs, root, parent, path);

  runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
  confirmOwner("python", "reptiles", fs, path);

  runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
  confirmOwner("hadoop", "toys", fs, root, parent, path);

  // Test different characters in names

  runCmd(shell, "-chown", "hdfs.user", file);
  confirmOwner("hdfs.user", null, fs, path);

  runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
  confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);

  runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
  confirmOwner("hdfs/[email protected]", "asf-projects", fs, path);

  runCmd(shell, "-chgrp", "[email protected]/100", file);
  confirmOwner(null, "[email protected]/100", fs, path);

  cluster.shutdown();
}
 
源代码13 项目: RDFS   文件: TestDFSShell.java
public void testTouch() throws IOException, ParseException {
  Configuration conf = new Configuration();
  conf.set("dfs.access.time.precision", "100");
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fs = cluster.getFileSystem();
  assertTrue("Not a HDFS: " + fs.getUri(),
             fs instanceof DistributedFileSystem);

  FsShell shell = new FsShell();
  shell.setConf(conf);

  try {
    SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

    // Test file creation
    Path file1 = new Path("/tmp/file1.txt");
    runCmd(shell, "-touch", "" + file1);
    assertTrue("Touch didn't create a file!", fs.exists(file1));
    assertTimesCorrect("Incorrect time for " + file1, fs, file1, null, null);

    // Verify that "-d" option works correctly
    String targetDate = "2001-02-03 04:05:06";
    Date d = df.parse(targetDate);
    // short format
    runCmd(shell, "-touch", "-d", targetDate, "" + file1);
    assertTimesCorrect("-touch -d didn't work", fs, file1, d, d);

    targetDate = "2002-02-02 02:02:02";
    d = df.parse(targetDate);
    // long format
    runCmd(shell, "-touch", "--date", targetDate, "" + file1);
    assertTimesCorrect("-touch --date didn't work", fs, file1, d, d);

    targetDate = "2003-03-03 03:03:03";
    d = df.parse(targetDate);
    // long format #2
    runCmd(shell, "-touch", "--date=" + targetDate, "" + file1);
    assertTimesCorrect("-touch --date didn't work", fs, file1, d, d);

    // Verify that touch sets current time by default
    runCmd(shell, "-touch", "" + file1);
    assertTimesCorrect("-touch didn't set current time", fs, file1, null, null);
    
    // Verify that "-c" works correctly
    Path file2 = new Path("/tmp/file2.txt");
    int exitCode = runCmd(shell, "-touch", "-c", "" + file2);
    assertTrue("-touch -c didn't return error", exitCode != 0);
    assertTrue("-touch -c created file", !fs.exists(file2));
    // Create file with stale atime&mtime
    targetDate = "1999-09-09 09:09:09";
    d = df.parse(targetDate);
    runCmd(shell, "-touch", "-d", targetDate, "" + file2);
    assertTimesCorrect("-touch -d didn't work", fs, file2, d, d);
    // Verify that "-touch -c" updated times correctly
    exitCode = runCmd(shell, "-touch", "-c", "" + file2);
    assertTrue("-touch -c failed on existing file", exitCode == 0);
    assertTimesCorrect("-touch -c didn't update file times", fs, file2, null, null);

    // Verify that "-a" and "-m" work correctly
    String date1 = "2001-01-01 01:01:01";
    String date2 = "2002-02-02 02:02:02";
    Date d1 = df.parse(date1);
    Date d2 = df.parse(date2);
    Date oldFile1Mtime = new Date(fs.getFileStatus(file1).getModificationTime());
    runCmd(shell, "-touch", "-a", "--date", date1, "" + file1);
    assertTimesCorrect("Option -a didn't work", fs, file1, d1, oldFile1Mtime);
    runCmd(shell, "-touch", "-m", "--date", date2, "" + file1);
    assertTimesCorrect("Option -m didn't work", fs, file1, d1, d2);
    Date oldFile2Atime = new Date(fs.getFileStatus(file2).getAccessTime());
    runCmd(shell, "-touch", "-m", "--date", date1, "" + file2);
    assertTimesCorrect("Option -m didn't work", fs, file2, oldFile2Atime, d1);
    runCmd(shell, "-touch", "-a", "--date", date2, "" + file2);
    assertTimesCorrect("Option -a didn't work", fs, file2, d2, d1);
    runCmd(shell, "-touch", "-au", Long.toString(d1.getTime()), "" + file2);
    assertTimesCorrect("Option -a and -u didn't work", fs, file2, d1, d1);
    runCmd(shell, "-touch", "-amu", Long.toString(d2.getTime()), "" + file2);
    assertTimesCorrect("Option -a, -m and -u didn't work", fs, file2, d2, d2);
  } finally {
    try {
      fs.close();
    } catch (Exception e) {
    }
    cluster.shutdown();
  }
}
 
源代码14 项目: hadoop-gpu   文件: TestDFSShell.java
public void testURIPaths() throws Exception {
  Configuration srcConf = new Configuration();
  Configuration dstConf = new Configuration();
  MiniDFSCluster srcCluster =  null;
  MiniDFSCluster dstCluster = null;
  String bak = System.getProperty("test.build.data");
  try{
    srcCluster = new MiniDFSCluster(srcConf, 2, true, null);
    File nameDir = new File(new File(bak), "dfs_tmp_uri/");
    nameDir.mkdirs();
    System.setProperty("test.build.data", nameDir.toString());
    dstCluster = new MiniDFSCluster(dstConf, 2, true, null);
    FileSystem srcFs = srcCluster.getFileSystem();
    FileSystem dstFs = dstCluster.getFileSystem();
    FsShell shell = new FsShell();
    shell.setConf(srcConf);
    //check for ls
    String[] argv = new String[2];
    argv[0] = "-ls";
    argv[1] = dstFs.getUri().toString() + "/";
    int ret = ToolRunner.run(shell, argv);
    assertTrue("ls works on remote uri ", (ret==0));
    //check for rm -r 
    dstFs.mkdirs(new Path("/hadoopdir"));
    argv = new String[2];
    argv[0] = "-rmr";
    argv[1] = dstFs.getUri().toString() + "/hadoopdir";
    ret = ToolRunner.run(shell, argv);
    assertTrue("-rmr works on remote uri " + argv[1], (ret==0));
    //check du 
    argv[0] = "-du";
    argv[1] = dstFs.getUri().toString() + "/";
    ret = ToolRunner.run(shell, argv);
    assertTrue("du works on remote uri ", (ret ==0));
    //check put
    File furi = new File(TEST_ROOT_DIR, "furi");
    createLocalFile(furi);
    argv = new String[3];
    argv[0] = "-put";
    argv[1] = furi.toString();
    argv[2] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" put is working ", (ret==0));
    //check cp 
    argv[0] = "-cp";
    argv[1] = dstFs.getUri().toString() + "/furi";
    argv[2] = srcFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" cp is working ", (ret==0));
    assertTrue(srcFs.exists(new Path("/furi")));
    //check cat 
    argv = new String[2];
    argv[0] = "-cat";
    argv[1] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" cat is working ", (ret == 0));
    //check chown
    dstFs.delete(new Path("/furi"), true);
    dstFs.delete(new Path("/hadoopdir"), true);
    String file = "/tmp/chownTest";
    Path path = new Path(file);
    Path parent = new Path("/tmp");
    Path root = new Path("/");
    TestDFSShell.writeFile(dstFs, path);
    runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
    confirmOwner(null, "herbivores", dstFs, parent, path);
    runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
    confirmOwner(null, "reptiles", dstFs, root, parent, path);
    //check if default hdfs:/// works 
    argv[0] = "-cat";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue(" default works for cat", (ret == 0));
    argv[0] = "-ls";
    argv[1] = "hdfs:///";
    ret = ToolRunner.run(shell, argv);
    assertTrue("default works for ls ", (ret == 0));
    argv[0] = "-rmr";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertTrue("default works for rm/rmr", (ret ==0));
  } finally {
    System.setProperty("test.build.data", bak);
    if (null != srcCluster) {
      srcCluster.shutdown();
    }
    if (null != dstCluster) {
      dstCluster.shutdown();
    }
  }
}
 
源代码15 项目: hadoop-gpu   文件: TestDFSShell.java
public void testFilePermissions() throws IOException {
  Configuration conf = new Configuration();
  
  //test chmod on local fs
  FileSystem fs = FileSystem.getLocal(conf);
  testChmod(conf, fs, 
            (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
  
  conf.set("dfs.permissions", "true");
  
  //test chmod on DFS
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  fs = cluster.getFileSystem();
  testChmod(conf, fs, "/tmp/chmodTest");
  
  // test chown and chgrp on DFS:
  
  FsShell shell = new FsShell();
  shell.setConf(conf);
  fs = cluster.getFileSystem();
  
  /* For dfs, I am the super user and I can change ower of any file to
   * anything. "-R" option is already tested by chmod test above.
   */
  
  String file = "/tmp/chownTest";
  Path path = new Path(file);
  Path parent = new Path("/tmp");
  Path root = new Path("/");
  TestDFSShell.writeFile(fs, path);
  
  runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
  confirmOwner(null, "herbivores", fs, parent, path);
  
  runCmd(shell, "-chgrp", "mammals", file);
  confirmOwner(null, "mammals", fs, path);
  
  runCmd(shell, "-chown", "-R", ":reptiles", "/");
  confirmOwner(null, "reptiles", fs, root, parent, path);
  
  runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
  confirmOwner("python", "reptiles", fs, path);

  runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
  confirmOwner("hadoop", "toys", fs, root, parent, path);
  
  // Test different characters in names

  runCmd(shell, "-chown", "hdfs.user", file);
  confirmOwner("hdfs.user", null, fs, path);
  
  runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
  confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
  
  runCmd(shell, "-chown", "hdfs/[email protected]:asf-projects", file);
  confirmOwner("hdfs/[email protected]", "asf-projects", fs, path);
  
  runCmd(shell, "-chgrp", "[email protected]/100", file);
  confirmOwner(null, "[email protected]/100", fs, path);
  
  cluster.shutdown();
}
 
 方法所在类
 同类方法