类org.apache.hadoop.fs.contract.ContractTestUtils源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.contract.ContractTestUtils的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop-ozone   文件: TestOzoneFileSystem.java
private void testCreateDoesNotAddParentDirKeys() throws Exception {
  Path grandparent = new Path("/testCreateDoesNotAddParentDirKeys");
  Path parent = new Path(grandparent, "parent");
  Path child = new Path(parent, "child");
  ContractTestUtils.touch(fs, child);
  rootItemCount++; // grandparent

  OzoneKeyDetails key = getKey(child, false);
  assertEquals(key.getName(), o3fs.pathToKey(child));

  // Creating a child should not add parent keys to the bucket
  try {
    getKey(parent, true);
  } catch (IOException ex) {
    assertKeyNotFoundException(ex);
  }

  // List status on the parent should show the child file
  assertEquals("List status of parent should include the 1 child file", 1L,
      fs.listStatus(parent).length);
  assertTrue("Parent directory does not appear to be a directory",
      fs.getFileStatus(parent).isDirectory());
}
 
源代码2 项目: hadoop-ozone   文件: TestOzoneFileSystem.java
private void testDeleteCreatesFakeParentDir() throws Exception {
  Path grandparent = new Path("/testDeleteCreatesFakeParentDir");
  Path parent = new Path(grandparent, "parent");
  Path child = new Path(parent, "child");
  ContractTestUtils.touch(fs, child);
  rootItemCount++; // grandparent

  // Verify that parent dir key does not exist
  // Creating a child should not add parent keys to the bucket
  try {
    getKey(parent, true);
  } catch (IOException ex) {
    assertKeyNotFoundException(ex);
  }

  // Delete the child key
  fs.delete(child, false);

  // Deleting the only child should create the parent dir key if it does
  // not exist
  String parentKey = o3fs.pathToKey(parent) + "/";
  OzoneKeyDetails parentKeyInfo = getKey(parent, true);
  assertEquals(parentKey, parentKeyInfo.getName());
}
 
源代码3 项目: hadoop-ozone   文件: TestOzoneFileSystem.java
private void testListStatus() throws Exception {
  Path parent = new Path("/testListStatus");
  Path file1 = new Path(parent, "key1");
  Path file2 = new Path(parent, "key2");
  ContractTestUtils.touch(fs, file1);
  ContractTestUtils.touch(fs, file2);
  rootItemCount++; // parent

  // ListStatus on a directory should return all subdirs along with
  // files, even if there exists a file and sub-dir with the same name.
  FileStatus[] fileStatuses = o3fs.listStatus(parent);
  assertEquals("FileStatus did not return all children of the directory",
      2, fileStatuses.length);

  // ListStatus should return only the immediate children of a directory.
  Path file3 = new Path(parent, "dir1/key3");
  Path file4 = new Path(parent, "dir1/key4");
  ContractTestUtils.touch(fs, file3);
  ContractTestUtils.touch(fs, file4);
  fileStatuses = o3fs.listStatus(parent);
  assertEquals("FileStatus did not return all children of the directory",
      3, fileStatuses.length);
}
 
源代码4 项目: hadoop   文件: TestS3ADeleteManyFiles.java
@Test
public void testOpenCreate() throws IOException {
  Path dir = new Path("/tests3a");
  ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);


  /*
  Enable to test the multipart upload
  try {
    ContractTestUtils.createAndVerifyFile(fs, dir,
        (long)6 * 1024 * 1024 * 1024);
  } catch (IOException e) {
    fail(e.getMessage());
  }
  */
}
 
源代码5 项目: big-c   文件: TestS3ADeleteManyFiles.java
@Test
public void testOpenCreate() throws IOException {
  Path dir = new Path("/tests3a");
  ContractTestUtils.createAndVerifyFile(fs, dir, 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 5 * 1024 * 1024);
  ContractTestUtils.createAndVerifyFile(fs, dir, 20 * 1024 * 1024);


  /*
  Enable to test the multipart upload
  try {
    ContractTestUtils.createAndVerifyFile(fs, dir,
        (long)6 * 1024 * 1024 * 1024);
  } catch (IOException e) {
    fail(e.getMessage());
  }
  */
}
 
@Test
public void testConcatMultiple() throws Throwable {
  int numFiles = GoogleCloudStorage.MAX_COMPOSE_OBJECTS * 3 / 2;
  Path testPath = path("test");

  byte[][] blocks = new byte[numFiles][0];
  Path[] srcs = new Path[numFiles];
  for (int i = 0; i < numFiles; i++) {
    Path srcFile = new Path(testPath, "" + i);
    blocks[i] = dataset(TEST_FILE_LEN, i, 255);
    createFile(getFileSystem(), srcFile, true, blocks[i]);
    srcs[i] = srcFile;
  }
  Path target = new Path(testPath, "target");

  createFile(getFileSystem(), target, false, new byte[0]);
  getFileSystem().concat(target, srcs);
  assertFileHasLength(getFileSystem(), target, TEST_FILE_LEN * numFiles);
  ContractTestUtils.validateFileContent(
      ContractTestUtils.readDataset(getFileSystem(), target, TEST_FILE_LEN * numFiles), blocks);
}
 
源代码7 项目: hadoop-ozone   文件: TestOzoneFileSystem.java
/**
 * Tests listStatus on a path with subdirs.
 */
private void testListStatusOnSubDirs() throws Exception {
  // Create the following key structure
  //      /dir1/dir11/dir111
  //      /dir1/dir12
  //      /dir1/dir12/file121
  //      /dir2
  // ListStatus on /dir1 should return all its immediated subdirs only
  // which are /dir1/dir11 and /dir1/dir12. Super child files/dirs
  // (/dir1/dir12/file121 and /dir1/dir11/dir111) should not be returned by
  // listStatus.
  Path dir1 = new Path("/dir1");
  Path dir11 = new Path(dir1, "dir11");
  Path dir111 = new Path(dir11, "dir111");
  Path dir12 = new Path(dir1, "dir12");
  Path file121 = new Path(dir12, "file121");
  Path dir2 = new Path("/dir2");
  fs.mkdirs(dir111);
  fs.mkdirs(dir12);
  ContractTestUtils.touch(fs, file121);
  fs.mkdirs(dir2);

  FileStatus[] fileStatuses = o3fs.listStatus(dir1);
  assertEquals("FileStatus should return only the immediate children", 2,
      fileStatuses.length);

  // Verify that the two children of /dir1 returned by listStatus operation
  // are /dir1/dir11 and /dir1/dir12.
  String fileStatus1 = fileStatuses[0].getPath().toUri().getPath();
  String fileStatus2 = fileStatuses[1].getPath().toUri().getPath();
  assertTrue(fileStatus1.equals(dir11.toString()) ||
      fileStatus1.equals(dir12.toString()));
  assertTrue(fileStatus2.equals(dir11.toString()) ||
      fileStatus2.equals(dir12.toString()));
}
 
源代码8 项目: hadoop-ozone   文件: TestOzoneFileSystem.java
public void testSeekOnFileLength() throws IOException {
  Path file = new Path("/file");
  ContractTestUtils.createFile(fs, file, true, "a".getBytes());
  try (FSDataInputStream stream = fs.open(file)) {
    long fileLength = fs.getFileStatus(file).getLen();
    stream.seek(fileLength);
    assertEquals(-1, stream.read());
  }
}
 
源代码9 项目: hadoop-ozone   文件: TestRootedOzoneFileSystem.java
@Test
public void testCreateDoesNotAddParentDirKeys() throws Exception {
  Path grandparent = new Path(testBucketPath,
      "testCreateDoesNotAddParentDirKeys");
  Path parent = new Path(grandparent, "parent");
  Path child = new Path(parent, "child");
  ContractTestUtils.touch(fs, child);

  OzoneKeyDetails key = getKey(child, false);
  OFSPath childOFSPath = new OFSPath(child);
  Assert.assertEquals(key.getName(), childOFSPath.getKeyName());

  // Creating a child should not add parent keys to the bucket
  try {
    getKey(parent, true);
  } catch (IOException ex) {
    assertKeyNotFoundException(ex);
  }

  // List status on the parent should show the child file
  Assert.assertEquals(
      "List status of parent should include the 1 child file",
      1L, fs.listStatus(parent).length);
  Assert.assertTrue(
      "Parent directory does not appear to be a directory",
      fs.getFileStatus(parent).isDirectory());
}
 
源代码10 项目: hadoop-ozone   文件: TestRootedOzoneFileSystem.java
@Test
public void testDeleteCreatesFakeParentDir() throws Exception {
  Path grandparent = new Path(testBucketPath,
      "testDeleteCreatesFakeParentDir");
  Path parent = new Path(grandparent, "parent");
  Path child = new Path(parent, "child");
  ContractTestUtils.touch(fs, child);

  // Verify that parent dir key does not exist
  // Creating a child should not add parent keys to the bucket
  try {
    getKey(parent, true);
  } catch (IOException ex) {
    assertKeyNotFoundException(ex);
  }

  // Delete the child key
  Assert.assertTrue(fs.delete(child, false));

  // Deleting the only child should create the parent dir key if it does
  // not exist
  OFSPath parentOFSPath = new OFSPath(parent);
  String parentKey = parentOFSPath.getKeyName() + "/";
  OzoneKeyDetails parentKeyInfo = getKey(parent, true);
  Assert.assertEquals(parentKey, parentKeyInfo.getName());

  // Recursive delete with DeleteIterator
  Assert.assertTrue(fs.delete(grandparent, true));
}
 
源代码11 项目: hadoop-ozone   文件: TestRootedOzoneFileSystem.java
@Test
public void testListStatus() throws Exception {
  Path parent = new Path(testBucketPath, "testListStatus");
  Path file1 = new Path(parent, "key1");
  Path file2 = new Path(parent, "key2");

  FileStatus[] fileStatuses = ofs.listStatus(testBucketPath);
  Assert.assertEquals("Should be empty", 0, fileStatuses.length);

  ContractTestUtils.touch(fs, file1);
  ContractTestUtils.touch(fs, file2);

  fileStatuses = ofs.listStatus(testBucketPath);
  Assert.assertEquals("Should have created parent",
      1, fileStatuses.length);
  Assert.assertEquals("Parent path doesn't match",
      fileStatuses[0].getPath().toUri().getPath(), parent.toString());

  // ListStatus on a directory should return all subdirs along with
  // files, even if there exists a file and sub-dir with the same name.
  fileStatuses = ofs.listStatus(parent);
  Assert.assertEquals(
      "FileStatus did not return all children of the directory",
      2, fileStatuses.length);

  // ListStatus should return only the immediate children of a directory.
  Path file3 = new Path(parent, "dir1/key3");
  Path file4 = new Path(parent, "dir1/key4");
  ContractTestUtils.touch(fs, file3);
  ContractTestUtils.touch(fs, file4);
  fileStatuses = ofs.listStatus(parent);
  Assert.assertEquals(
      "FileStatus did not return all children of the directory",
      3, fileStatuses.length);
}
 
源代码12 项目: hadoop-ozone   文件: TestRootedOzoneFileSystem.java
/**
 * Tests listStatus on a path with subdirs.
 */
@Test
public void testListStatusOnSubDirs() throws Exception {
  // Create the following key structure
  //      /dir1/dir11/dir111
  //      /dir1/dir12
  //      /dir1/dir12/file121
  //      /dir2
  // ListStatus on /dir1 should return all its immediated subdirs only
  // which are /dir1/dir11 and /dir1/dir12. Super child files/dirs
  // (/dir1/dir12/file121 and /dir1/dir11/dir111) should not be returned by
  // listStatus.
  Path dir1 = new Path(testBucketPath, "dir1");
  Path dir11 = new Path(dir1, "dir11");
  Path dir111 = new Path(dir11, "dir111");
  Path dir12 = new Path(dir1, "dir12");
  Path file121 = new Path(dir12, "file121");
  Path dir2 = new Path(testBucketPath, "dir2");
  fs.mkdirs(dir111);
  fs.mkdirs(dir12);
  ContractTestUtils.touch(fs, file121);
  fs.mkdirs(dir2);

  FileStatus[] fileStatuses = ofs.listStatus(dir1);
  Assert.assertEquals(
      "FileStatus should return only the immediate children",
      2, fileStatuses.length);

  // Verify that the two children of /dir1 returned by listStatus operation
  // are /dir1/dir11 and /dir1/dir12.
  String fileStatus1 = fileStatuses[0].getPath().toUri().getPath();
  String fileStatus2 = fileStatuses[1].getPath().toUri().getPath();
  Assert.assertTrue(fileStatus1.equals(dir11.toString()) ||
      fileStatus1.equals(dir12.toString()));
  Assert.assertTrue(fileStatus2.equals(dir11.toString()) ||
      fileStatus2.equals(dir12.toString()));
}
 
源代码13 项目: hadoop   文件: TestHDFSContractAppend.java
@Override
public void testRenameFileBeingAppended() throws Throwable {
  try {
    super.testRenameFileBeingAppended();
    fail("Expected a FileNotFoundException");
  } catch (FileNotFoundException e) {
    // downgrade
    ContractTestUtils.downgrade("Renaming an open file" +
                                "still creates the old path", e);

  }
}
 
源代码14 项目: hadoop   文件: LocalFSContract.java
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
 
源代码15 项目: big-c   文件: TestHDFSContractAppend.java
@Override
public void testRenameFileBeingAppended() throws Throwable {
  try {
    super.testRenameFileBeingAppended();
    fail("Expected a FileNotFoundException");
  } catch (FileNotFoundException e) {
    // downgrade
    ContractTestUtils.downgrade("Renaming an open file" +
                                "still creates the old path", e);

  }
}
 
源代码16 项目: big-c   文件: LocalFSContract.java
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
 
源代码17 项目: cephfs-hadoop   文件: CephFSContract.java
/**
 *  tweak some of the contract parameters based on the local system
 *  state
 */
protected void adjustContractToLocalEnvironment() {
  if (Shell.WINDOWS) {
    //NTFS doesn't do case sensitivity, and its permissions are ACL-based
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE), false);
    getConf().setBoolean(getConfKey(ContractOptions.SUPPORTS_UNIX_PERMISSIONS), false);
  } else if (ContractTestUtils.isOSX()) {
    //OSX HFS+ is not case sensitive
    getConf().setBoolean(getConfKey(ContractOptions.IS_CASE_SENSITIVE),
                         false);
  }
}
 
源代码18 项目: hadoop   文件: TestSwiftContractOpen.java
@Override
public void testOpenReadDir() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
 
源代码19 项目: hadoop   文件: TestSwiftContractOpen.java
@Override
public void testOpenReadDirWithChild() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
 
源代码20 项目: hadoop   文件: TestSwiftContractCreate.java
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip("blobstores can't distinguish empty directories from files");
}
 
源代码21 项目: hadoop   文件: TestS3NContractCreate.java
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
 
源代码22 项目: hadoop   文件: TestS3AContractCreate.java
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
 
源代码23 项目: hadoop   文件: TestS3AFastOutputStream.java
@Test
public void testRegularUpload() throws IOException {
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 1024 * 1024);
}
 
源代码24 项目: hadoop   文件: TestS3AFastOutputStream.java
@Test
public void testMultiPartUpload() throws IOException {
  ContractTestUtils.createAndVerifyFile(fs, getTestPath(), 6 * 1024 *
      1024);
}
 
源代码25 项目: hadoop   文件: TestS3ADeleteManyFiles.java
@Test
public void testBulkRenameAndDelete() throws Throwable {
  final Path scaleTestDir = getTestPath();
  final Path srcDir = new Path(scaleTestDir, "src");
  final Path finalDir = new Path(scaleTestDir, "final");
  final long count = getOperationCount();
  ContractTestUtils.rm(fs, scaleTestDir, true, false);

  fs.mkdirs(srcDir);
  fs.mkdirs(finalDir);

  int testBufferSize = fs.getConf()
      .getInt(ContractTestUtils.IO_CHUNK_BUFFER_SIZE,
          ContractTestUtils.DEFAULT_IO_CHUNK_BUFFER_SIZE);
  // use Executor to speed up file creation
  ExecutorService exec = Executors.newFixedThreadPool(16);
  final ExecutorCompletionService<Boolean> completionService =
      new ExecutorCompletionService<Boolean>(exec);
  try {
    final byte[] data = ContractTestUtils.dataset(testBufferSize, 'a', 'z');

    for (int i = 0; i < count; ++i) {
      final String fileName = "foo-" + i;
      completionService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws IOException {
          ContractTestUtils.createFile(fs, new Path(srcDir, fileName),
              false, data);
          return fs.exists(new Path(srcDir, fileName));
        }
      });
    }
    for (int i = 0; i < count; ++i) {
      final Future<Boolean> future = completionService.take();
      try {
        if (!future.get()) {
          LOG.warn("cannot create file");
        }
      } catch (ExecutionException e) {
        LOG.warn("Error while uploading file", e.getCause());
        throw e;
      }
    }
  } finally {
    exec.shutdown();
  }

  int nSrcFiles = fs.listStatus(srcDir).length;
  fs.rename(srcDir, finalDir);
  assertEquals(nSrcFiles, fs.listStatus(finalDir).length);
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + 0));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + count / 2));
  ContractTestUtils.assertPathDoesNotExist(fs, "not deleted after rename",
      new Path(srcDir, "foo-" + (count - 1)));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + 0));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + count/2));
  ContractTestUtils.assertPathExists(fs, "not renamed to dest dir",
      new Path(finalDir, "foo-" + (count-1)));

  ContractTestUtils.assertDeleted(fs, finalDir, true, false);
}
 
源代码26 项目: hadoop   文件: S3AScaleTestBase.java
@After
public void tearDown() throws Exception {
  ContractTestUtils.rm(fs, getTestPath(), true, true);
}
 
源代码27 项目: big-c   文件: TestSwiftContractOpen.java
@Override
public void testOpenReadDir() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
 
源代码28 项目: big-c   文件: TestSwiftContractOpen.java
@Override
public void testOpenReadDirWithChild() throws Throwable {
  ContractTestUtils.skip("Skipping object-store quirk");
}
 
源代码29 项目: big-c   文件: TestSwiftContractCreate.java
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip("blobstores can't distinguish empty directories from files");
}
 
源代码30 项目: big-c   文件: TestS3NContractCreate.java
@Override
public void testOverwriteEmptyDirectory() throws Throwable {
  ContractTestUtils.skip(
      "blobstores can't distinguish empty directories from files");
}
 
 类所在包
 同包方法