类org.apache.hadoop.fs.FileSystemTestHelper源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.FileSystemTestHelper的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: BaseTestHttpFSWith.java

private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
源代码2 项目: hadoop   文件: TestReservedRawPaths.java

@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
源代码3 项目: hadoop   文件: TestEncryptionZones.java

@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
源代码4 项目: hadoop   文件: ViewFileSystemBaseTest.java

@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fsTarget.mkdirs(new Path(targetTestRoot,"user"));
  fsTarget.mkdirs(new Path(targetTestRoot,"data"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
  FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  conf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
源代码5 项目: hadoop   文件: ViewFileSystemBaseTest.java

@Test
public void testGetBlockLocations() throws IOException {
  Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
  FileSystemTestHelper.createFile(fsTarget, 
      targetFilePath, 10, 1024);
  Path viewFilePath = new Path("/data/largeFile");
  Assert.assertTrue("Created File should be type File",
      fsView.isFile(viewFilePath));
  BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
  Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
  BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);
  
  
  // Same test but now get it via the FileStatus Parameter
  fsView.getFileBlockLocations(
      fsView.getFileStatus(viewFilePath), 0, 10240+100);
  targetBL = fsTarget.getFileBlockLocations(
      fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);  
}
 
源代码6 项目: hadoop   文件: TestChRootedFileSystem.java

@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  Configuration conf = new Configuration();
  fSysTarget = FileSystem.getLocal(conf);
  fileSystemTestHelper = new FileSystemTestHelper();
  chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
  // In case previous test was killed before cleanup
  fSysTarget.delete(chrootedTo, true);
  
  fSysTarget.mkdirs(chrootedTo);


  // ChRoot to the root of the testDirectory
  fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
 
源代码7 项目: big-c   文件: BaseTestHttpFSWith.java

private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
源代码8 项目: big-c   文件: TestReservedRawPaths.java

@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  File testRootDir = new File(testRoot).getAbsoluteFile();
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  // Need to set the client's KeyProvider to the NN's for JKS,
  // else the updates do not get flushed properly
  fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
      .getProvider());
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
源代码9 项目: big-c   文件: TestEncryptionZones.java

@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  fsHelper = new FileSystemTestHelper();
  // Set up java key store
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, getKeyProviderURI());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  // Lower the batch size for testing
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
      2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
  fs = cluster.getFileSystem();
  fsWrapper = new FileSystemTestWrapper(fs);
  fcWrapper = new FileContextTestWrapper(
      FileContext.getFileContext(cluster.getURI(), conf));
  dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
  setProvider();
  // Create a test key
  DFSTestUtil.createKey(TEST_KEY, cluster, conf);
}
 
源代码10 项目: big-c   文件: ViewFileSystemBaseTest.java

@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fsTarget.mkdirs(new Path(targetTestRoot,"user"));
  fsTarget.mkdirs(new Path(targetTestRoot,"data"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
  FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  conf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
源代码11 项目: big-c   文件: ViewFileSystemBaseTest.java

@Test
public void testGetBlockLocations() throws IOException {
  Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
  FileSystemTestHelper.createFile(fsTarget, 
      targetFilePath, 10, 1024);
  Path viewFilePath = new Path("/data/largeFile");
  Assert.assertTrue("Created File should be type File",
      fsView.isFile(viewFilePath));
  BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
  Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
  BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);
  
  
  // Same test but now get it via the FileStatus Parameter
  fsView.getFileBlockLocations(
      fsView.getFileStatus(viewFilePath), 0, 10240+100);
  targetBL = fsTarget.getFileBlockLocations(
      fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);  
}
 
源代码12 项目: big-c   文件: TestChRootedFileSystem.java

@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  Configuration conf = new Configuration();
  fSysTarget = FileSystem.getLocal(conf);
  fileSystemTestHelper = new FileSystemTestHelper();
  chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
  // In case previous test was killed before cleanup
  fSysTarget.delete(chrootedTo, true);
  
  fSysTarget.mkdirs(chrootedTo);


  // ChRoot to the root of the testDirectory
  fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
 
源代码13 项目: hadoop   文件: TestViewFileSystemHdfs.java

@Override
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fsTarget = fHdfs;
  fsTarget2 = fHdfs2;
  targetTestRoot2 = new FileSystemTestHelper().getAbsoluteTestRootPath(fsTarget2);
  super.setUp();
}
 
源代码14 项目: hadoop   文件: TestDelegationTokenFetcher.java

@Before 
public void init() throws URISyntaxException, IOException {
  dfs = mock(DistributedFileSystem.class);
  conf = new Configuration();
  uri = new URI("hdfs://" + SERVICE_VALUE);
  FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
}
 
源代码15 项目: hadoop   文件: TestEncryptionZonesWithHA.java

@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
 
源代码16 项目: hadoop   文件: TestWebHDFSForHA.java

@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
源代码17 项目: hadoop   文件: TestSaslDataTransfer.java

/**
 * Tests DataTransferProtocol with the given client configuration.
 *
 * @param conf client configuration
 * @throws IOException if there is an I/O error
 */
private void doTest(HdfsConfiguration conf) throws IOException {
  fs = FileSystem.get(cluster.getURI(), conf);
  FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
  assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
    DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
  BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
    Long.MAX_VALUE);
  assertNotNull(blockLocations);
  assertEquals(NUM_BLOCKS, blockLocations.length);
  for (BlockLocation blockLocation: blockLocations) {
    assertNotNull(blockLocation.getHosts());
    assertEquals(3, blockLocation.getHosts().length);
  }
}
 
源代码18 项目: hadoop   文件: TestFsShellPermission.java

private void createFiles(FileSystem fs, String topdir,
    FileEntry[] entries) throws IOException {
  for (FileEntry entry : entries) {
    String newPathStr = topdir + "/" + entry.getPath();
    Path newPath = new Path(newPathStr);
    if (entry.isDirectory()) {
      fs.mkdirs(newPath);
    } else {
      FileSystemTestHelper.createFile(fs,  newPath);
    }
    fs.setPermission(newPath, new FsPermission(entry.getPermission()));
    fs.setOwner(newPath, entry.getOwner(), entry.getGroup());
  }
}
 
源代码19 项目: hadoop   文件: TestTransferFsImage.java

/**
 * Test to verify the timeout of Image upload
 */
@Test(timeout = 10000)
public void testImageUploadTimeout() throws Exception {
  Configuration conf = new HdfsConfiguration();
  NNStorage mockStorage = Mockito.mock(NNStorage.class);
  HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
  try {
    testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
        TestImageTransferServlet.class);
    testServer.start();
    URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
    // set the timeout here, otherwise it will take default.
    TransferFsImage.timeout = 2000;

    File tmpDir = new File(new FileSystemTestHelper().getTestRootDir());
    tmpDir.mkdirs();

    File mockImageFile = File.createTempFile("image", "", tmpDir);
    FileOutputStream imageFile = new FileOutputStream(mockImageFile);
    imageFile.write("data".getBytes());
    imageFile.close();
    Mockito.when(
        mockStorage.findImageFile(Mockito.any(NameNodeFile.class),
            Mockito.anyLong())).thenReturn(mockImageFile);
    Mockito.when(mockStorage.toColonSeparatedString()).thenReturn(
        "storage:info:string");
    
    try {
      TransferFsImage.uploadImageFromStorage(serverURL, conf, mockStorage,
          NameNodeFile.IMAGE, 1L);
      fail("TransferImage Should fail with timeout");
    } catch (SocketTimeoutException e) {
      assertEquals("Upload should timeout", "Read timed out", e.getMessage());
    }
  } finally {
    testServer.stop();
  }
}
 
源代码20 项目: hadoop   文件: TestFsVolumeList.java

@Before
public void setUp() {
  dataset = mock(FsDatasetImpl.class);
  baseDir = new FileSystemTestHelper().getTestRootDir();
  Configuration blockScannerConf = new Configuration();
  blockScannerConf.setInt(DFSConfigKeys.
      DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
  blockScanner = new BlockScanner(null, blockScannerConf);
}
 
源代码21 项目: hadoop   文件: ViewFileSystemTestSetup.java

/**
 * 
 * @param fsTarget - the target fs of the view fs.
 * @return return the ViewFS File context to be used for tests
 * @throws Exception
 */
static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);
  fsTarget.mkdirs(targetOfTests);


  // Set up viewfs link for test dir as described above
  String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
  
  
  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);
  
  
  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");


  FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
  fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fsView.getWorkingDirectory());
  return fsView;
}
 
源代码22 项目: hadoop   文件: TestChRootedFileSystem.java

/**
 * We would have liked renames across file system to fail but 
 * Unfortunately there is not way to distinguish the two file systems 
 * @throws IOException
 */
@Test
public void testRenameAcrossFs() throws IOException {
  fSys.mkdirs(new Path("/newDir/dirFoo"));
  fSys.rename(new Path("/newDir/dirFoo"), new Path("file:///tmp/dirFooBar"));
  FileSystemTestHelper.isDir(fSys, new Path("/tmp/dirFooBar"));
}
 
源代码23 项目: hadoop   文件: TestChRootedFileSystem.java

@Test
public void testList() throws IOException {
  
  FileStatus fs = fSys.getFileStatus(new Path("/"));
  Assert.assertTrue(fs.isDirectory());
  //  should return the full path not the chrooted path
  Assert.assertEquals(fs.getPath(), chrootedTo);
  
  // list on Slash
  
  FileStatus[] dirPaths = fSys.listStatus(new Path("/"));

  Assert.assertEquals(0, dirPaths.length);
  
  

  fileSystemTestHelper.createFile(fSys, "/foo");
  fileSystemTestHelper.createFile(fSys, "/bar");
  fSys.mkdirs(new Path("/dirX"));
  fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys, "/dirY"));
  fSys.mkdirs(new Path("/dirX/dirXX"));
  
  dirPaths = fSys.listStatus(new Path("/"));
  Assert.assertEquals(4, dirPaths.length); // note 2 crc files
  
  // Note the the file status paths are the full paths on target
  fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "foo"), dirPaths);
    Assert.assertNotNull(fs);
    Assert.assertTrue(fs.isFile());
  fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "bar"), dirPaths);
    Assert.assertNotNull(fs);
    Assert.assertTrue(fs.isFile());
  fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirX"), dirPaths);
    Assert.assertNotNull(fs);
    Assert.assertTrue(fs.isDirectory());
  fs = FileSystemTestHelper.containsPath(new Path(chrootedTo, "dirY"), dirPaths);
    Assert.assertNotNull(fs);
    Assert.assertTrue(fs.isDirectory());
}
 

static FakeFileSystem setupFileSystem(URI uri, Class clazz)
    throws Exception {
  String scheme = uri.getScheme();
  conf.set("fs."+scheme+".impl", clazz.getName());
  FakeFileSystem fs = (FakeFileSystem)FileSystem.get(uri, conf);
  assertEquals(uri, fs.getUri());
  Path targetPath = new FileSystemTestHelper().getAbsoluteTestRootPath(fs);
  ConfigUtil.addLink(conf, "/mounts/"+scheme, targetPath.toUri());
  return fs;
}
 
源代码25 项目: big-c   文件: TestViewFileSystemHdfs.java

@Override
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  fsTarget = fHdfs;
  fsTarget2 = fHdfs2;
  targetTestRoot2 = new FileSystemTestHelper().getAbsoluteTestRootPath(fsTarget2);
  super.setUp();
}
 
源代码26 项目: big-c   文件: TestDelegationTokenFetcher.java

@Before 
public void init() throws URISyntaxException, IOException {
  dfs = mock(DistributedFileSystem.class);
  conf = new Configuration();
  uri = new URI("hdfs://" + SERVICE_VALUE);
  FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
}
 
源代码27 项目: big-c   文件: TestEncryptionZonesWithHA.java

@Before
public void setupCluster() throws Exception {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  fsHelper = new FileSystemTestHelper();
  String testRoot = fsHelper.getTestRootDir();
  testRootDir = new File(testRoot).getAbsoluteFile();
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" +
      new Path(testRootDir.toString(), "test.jks").toUri()
  );

  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  cluster.waitActive();
  cluster.transitionToActive(0);

  fs = (DistributedFileSystem)HATestUtil.configureFailoverFs(cluster, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 0, conf);
  DFSTestUtil.createKey(TEST_KEY, cluster, 1, conf);
  nn0 = cluster.getNameNode(0);
  nn1 = cluster.getNameNode(1);
  dfsAdmin0 = new HdfsAdmin(cluster.getURI(0), conf);
  dfsAdmin1 = new HdfsAdmin(cluster.getURI(1), conf);
  KeyProviderCryptoExtension nn0Provider =
      cluster.getNameNode(0).getNamesystem().getProvider();
  fs.getClient().setKeyProvider(nn0Provider);
}
 
源代码28 项目: big-c   文件: TestWebHDFSForHA.java

@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
源代码29 项目: big-c   文件: TestSaslDataTransfer.java

/**
 * Tests DataTransferProtocol with the given client configuration.
 *
 * @param conf client configuration
 * @throws IOException if there is an I/O error
 */
private void doTest(HdfsConfiguration conf) throws IOException {
  fs = FileSystem.get(cluster.getURI(), conf);
  FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
  assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
    DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
  BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
    Long.MAX_VALUE);
  assertNotNull(blockLocations);
  assertEquals(NUM_BLOCKS, blockLocations.length);
  for (BlockLocation blockLocation: blockLocations) {
    assertNotNull(blockLocation.getHosts());
    assertEquals(3, blockLocation.getHosts().length);
  }
}
 
源代码30 项目: big-c   文件: TestFsShellPermission.java

private void createFiles(FileSystem fs, String topdir,
    FileEntry[] entries) throws IOException {
  for (FileEntry entry : entries) {
    String newPathStr = topdir + "/" + entry.getPath();
    Path newPath = new Path(newPathStr);
    if (entry.isDirectory()) {
      fs.mkdirs(newPath);
    } else {
      FileSystemTestHelper.createFile(fs,  newPath);
    }
    fs.setPermission(newPath, new FsPermission(entry.getPermission()));
    fs.setOwner(newPath, entry.getOwner(), entry.getGroup());
  }
}
 
 类所在包
 同包方法