org.apache.hadoop.fs.ReadOption#org.apache.hadoop.hdfs.DFSTestUtil源码实例Demo

下面列出了org.apache.hadoop.fs.ReadOption#org.apache.hadoop.hdfs.DFSTestUtil 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: TestRenameWithSnapshots.java
/**
 * Test rename from a non-snapshottable dir to a snapshottable dir
 */
@Test (timeout=60000)
public void testRenameFromNonSDir2SDir() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1);
  
  final Path newfoo = new Path(sdir2, "foo");
  hdfs.rename(foo, newfoo);
  
  INode fooNode = fsdir.getINode4Write(newfoo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
}
 
源代码2 项目: big-c   文件: TestFsDatasetCache.java
@Test(timeout=60000)
public void testPageRounder() throws Exception {
  // Write a small file
  Path fileName = new Path("/testPageRounder");
  final int smallBlocks = 512; // This should be smaller than the page size
  assertTrue("Page size should be greater than smallBlocks!",
      PAGE_SIZE > smallBlocks);
  final int numBlocks = 5;
  final int fileLen = smallBlocks * numBlocks;
  FSDataOutputStream out =
      fs.create(fileName, false, 4096, (short)1, smallBlocks);
  out.write(new byte[fileLen]);
  out.close();
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);
  // Cache the file and check the sizes match the page size
  setHeartbeatResponse(cacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
  // Uncache and check that it decrements by the page size too
  setHeartbeatResponse(uncacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
 
源代码3 项目: big-c   文件: TestHASafeMode.java
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
 
源代码4 项目: big-c   文件: BaseTestHttpFSWith.java
private void testConcat() throws Exception {
  Configuration config = getProxiedFSConf();
  config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(config);
    fs.mkdirs(getProxiedFSTestDir());
    Path path1 = new Path("/test/foo.txt");
    Path path2 = new Path("/test/bar.txt");
    Path path3 = new Path("/test/derp.txt");
    DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
    fs.close();
    fs = getHttpFSFileSystem();
    fs.concat(path1, new Path[]{path2, path3});
    fs.close();
    fs = FileSystem.get(config);
    Assert.assertTrue(fs.exists(path1));
    Assert.assertFalse(fs.exists(path2));
    Assert.assertFalse(fs.exists(path3));
    fs.close();
  }
}
 
源代码5 项目: hadoop   文件: TestWebHDFSForHA.java
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
源代码6 项目: hadoop   文件: TestFsDatasetCache.java
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
  // Create a file
  Path fileName = new Path("/testUncacheUnknownBlock");
  int fileLen = 4096;
  DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);

  // Try to uncache it without caching it first
  setHeartbeatResponse(uncacheBlocks(locs));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return fsd.getNumBlocksFailedToUncache() > 0;
    }
  }, 100, 10000);
}
 
源代码7 项目: big-c   文件: TestSnapshotDeletion.java
/**
 * Deleting directory with snapshottable descendant with snapshots must fail.
 */
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot2() throws Exception {
  Path file0 = new Path(sub, "file0");
  Path file1 = new Path(sub, "file1");
  DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  
  Path subfile1 = new Path(subsub, "file0");
  Path subfile2 = new Path(subsub, "file1");
  DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed);

  // Allow snapshot for subsub1, and create snapshot for it
  hdfs.allowSnapshot(subsub);
  hdfs.createSnapshot(subsub, "s1");

  // Deleting dir while its descedant subsub1 having snapshots should fail
  exception.expect(RemoteException.class);
  String error = subsub.toString()
      + " is snapshottable and already has snapshots";
  exception.expectMessage(error);
  hdfs.delete(dir, true);
}
 
源代码8 项目: RDFS   文件: TestMetricsTimeVaryingClasses.java
public void testMetricsTimeVaryingClasses() throws Exception {
  metrics.bytesWrittenLatency.resetMinMax();
  metrics.bytesWrittenRate.resetMinMax();

  //writesFromLocalClient uses MetricsTimeVaryingInt
  assertEquals(metrics.writesFromLocalClient.getCurrentIntervalValue(),0);
  final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
  DFSTestUtil.createFile(fileSystem, new Path("/tmp.txt"),
      LONG_FILE_LEN, (short)1, 1L);

  //bytesWritten uses MetricsTimeVaryingLong
  assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
  //bytesWrittenLatency uses MetricsTimeVaryingRate
  assertTrue(metrics.bytesWrittenLatency.getMaxTime()>0);
  assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
 
  //writesFromLocalClient uses MetricsTimeVaryingInt
  assertTrue(metrics.writesFromLocalClient.getCurrentIntervalValue()>0);
}
 
源代码9 项目: big-c   文件: TestScrLazyPersistFiles.java
public void doShortCircuitReadMetaFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted checksum file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  MiniDFSCluster.corruptBlock(metaFile);
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
源代码10 项目: big-c   文件: TestDistCpSync.java
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
源代码11 项目: hadoop   文件: TestHASafeMode.java
/**
 * Tests the case where, while a standby is down, more blocks are
 * added to the namespace, but not rolled. So, when it starts up,
 * it receives notification about the new blocks during
 * the safemode extension period.
 */
@Test
public void testBlocksAddedBeforeStandbyRestart() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  DFSTestUtil.createFile(fs, new Path("/test"), 3*BLOCK_SIZE, (short) 3, 1L);
  // Roll edit log so that, when the SBN restarts, it will load
  // the namespace during startup.
  nn0.getRpcServer().rollEditLog();

  banner("Creating some blocks that won't be in the edit log");
  DFSTestUtil.createFile(fs, new Path("/test2"), 5*BLOCK_SIZE, (short) 3, 1L);
  
  banner("Restarting standby");
  restartStandby();

  // We expect it not to be stuck in safemode, since those blocks
  // that are already visible to the SBN should be processed
  // in the initial block reports.
  assertSafeMode(nn1, 3, 3, 3, 0);

  banner("Waiting for standby to catch up to active namespace");
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  assertSafeMode(nn1, 8, 8, 3, 0);
}
 
源代码12 项目: RDFS   文件: TestDatanodeRestart.java
public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", 1024L);
  conf.setInt("dfs.write.packet.size", 512);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8 * 1024);
    util.createFiles(fs, TopDir, (short) 3);
    util.waitReplication(fs, TopDir, (short) 3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
源代码13 项目: hadoop   文件: TestDistCpSync.java
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
源代码14 项目: big-c   文件: TestRenameWithSnapshots.java
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
源代码15 项目: hadoop   文件: TestCacheDirectives.java
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
  // Create a giant file
  final Path fileName = new Path("/exceeds");
  final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
  int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
  DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
      0xFADED);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
      .setPath(fileName).setReplication((short) 1).build());
  waitForCachedBlocks(namenode, -1, numCachedReplicas,
      "testExceeds:1");
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);

  // Try creating a file with giant-sized blocks that exceed cache capacity
  dfs.delete(fileName, false);
  DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
      (short) 1, 0xFADED);
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);
}
 
源代码16 项目: hadoop   文件: TestMRCJCFileInputFormat.java
private void createInputs(FileSystem fs, Path inDir, String fileName)
    throws IOException, TimeoutException, InterruptedException {
  // create a multi-block file on hdfs
  Path path = new Path(inDir, fileName);
  final short replication = 2;
  DataOutputStream out = fs.create(path, true, 4096,
                                   replication, 512, null);
  for(int i=0; i < 1000; ++i) {
    out.writeChars("Hello\n");
  }
  out.close();
  System.out.println("Wrote file");
  DFSTestUtil.waitReplication(fs, path, replication);
}
 
源代码17 项目: big-c   文件: TestHost2NodesMap.java
@Test
public void testRemove() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  assertFalse(map.remove(nodeNotInMap));
  
  assertTrue(map.remove(dataNodes[0]));
  assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
  assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
  DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
  assertTrue(node==dataNodes[2] || node==dataNodes[3]);
  assertNull(map.getDatanodeByHost("4.4.4.4"));
  
  assertTrue(map.remove(dataNodes[2]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
  
  assertTrue(map.remove(dataNodes[3]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertNull(map.getDatanodeByHost("3.3.3.3"));
  
  assertFalse(map.remove(null));
  assertTrue(map.remove(dataNodes[1]));
  assertFalse(map.remove(dataNodes[1]));
}
 
源代码18 项目: big-c   文件: TestDataNodeMetrics.java
@Test
public void testSendDataPacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    // Create and read a 1 byte file
    Path tmpfile = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, tmpfile,
        (long)1, (short)1, 1L);
    DFSTestUtil.readFile(fs, tmpfile);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    // Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
    // signaling the end of the block
    assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
    assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check that the sendPacket percentiles rolled to non-zero values
    String sec = interval + "s";
    assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
    assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
源代码19 项目: big-c   文件: TestRetryCacheWithHA.java
@Override
void prepare() throws Exception {
  final Path targetPath = new Path(target);
  DFSTestUtil.createFile(dfs, targetPath, BlockSize, DataNodes, 0);
  for (int i = 0; i < srcPaths.length; i++) {
    DFSTestUtil.createFile(dfs, srcPaths[i], BlockSize, DataNodes, 0);
  }
  assertEquals(BlockSize, dfs.getFileStatus(targetPath).getLen());
}
 
源代码20 项目: hadoop   文件: TestStickyBit.java
private static void initCluster(boolean format) throws Exception {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(format)
    .build();
  hdfs = cluster.getFileSystem();
  assertTrue(hdfs instanceof DistributedFileSystem);
  hdfsAsUser1 = DFSTestUtil.getFileSystemAs(user1, conf);
  assertTrue(hdfsAsUser1 instanceof DistributedFileSystem);
  hdfsAsUser2 = DFSTestUtil.getFileSystemAs(user2, conf);
  assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
}
 
源代码21 项目: big-c   文件: TestSnapshotDeletion.java
/**
 * Delete a snapshot that is taken before a directory deletion (recursively),
 * directory diff list should be combined correctly.
 */
@Test (timeout=60000)
public void testDeleteSnapshot2() throws Exception {
  final Path root = new Path("/");

  Path dir = new Path("/dir1");
  Path file1 = new Path(dir, "file1");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  Path file2 = new Path(dir, "file2");
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
  INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
  long file2NodeId = file2Node.getId();

  hdfs.createSnapshot(root, "s2");

  // delete directory recursively
  assertTrue(hdfs.delete(dir, true));
  assertNotNull(fsdir.getInode(file2NodeId));

  // delete second snapshot
  hdfs.deleteSnapshot(root, "s2");
  assertTrue(fsdir.getInode(file2NodeId) == null);

  NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
  NameNodeAdapter.saveNamespace(cluster.getNameNode());

  // restart NN
  cluster.restartNameNodes();
}
 
源代码22 项目: big-c   文件: TestShortCircuitCache.java
@Test(timeout=60000)
public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverCleansUpSlotsOnFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  final Path TEST_PATH2 = new Path("/test_file2");
  final int TEST_FILE_LEN = 4096;
  final int SEED = 0xFADE1;
  DFSTestUtil.createFile(fs, TEST_PATH1, TEST_FILE_LEN,
      (short)1, SEED);
  DFSTestUtil.createFile(fs, TEST_PATH2, TEST_FILE_LEN,
      (short)1, SEED);

  // The first read should allocate one shared memory segment and slot.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // The second read should fail, and we should only have 1 segment and 1 slot
  // left.
  fs.getClient().getConf().brfFailureInjector =
      new TestCleanupFailureInjector();
  try {
    DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());
  cluster.shutdown();
  sockDir.close();
}
 
源代码23 项目: big-c   文件: TestAtomicFileOutputStream.java
/**
 * Test case where there is no existing file
 */
@Test
public void testWriteNewFile() throws IOException {
  OutputStream fos = new AtomicFileOutputStream(DST_FILE);
  assertFalse(DST_FILE.exists());
  fos.write(TEST_STRING.getBytes());
  fos.flush();
  assertFalse(DST_FILE.exists());
  fos.close();
  assertTrue(DST_FILE.exists());
  
  String readBackData = DFSTestUtil.readFile(DST_FILE);
  assertEquals(TEST_STRING, readBackData);
}
 
源代码24 项目: hadoop   文件: TestRetryCacheWithHA.java
@Override
void prepare() throws Exception {
  final Path filePath = new Path(oldName);
  if (!dfs.exists(filePath)) {
    DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  }
}
 
源代码25 项目: hadoop   文件: TestSnapshotRename.java
/**
 * Test rename a snapshot to another existing snapshot 
 */
@Test (timeout=60000)
public void testRenameToExistingSnapshot() throws Exception {
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  // Create snapshots for sub1
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
  
  exception.expect(SnapshotException.class);
  String error = "The snapshot s2 already exists for directory "
      + sub1.toString();
  exception.expectMessage(error);
  hdfs.renameSnapshot(sub1, "s1", "s2");
}
 
源代码26 项目: big-c   文件: TestRenameWithSnapshots.java
@Test (timeout=60000)
public void testRenameTwiceInSnapshot() throws Exception {
  hdfs.mkdirs(sub1);
  hdfs.allowSnapshot(sub1);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(sub1, snap1);
  hdfs.rename(file1, file2);
  
  hdfs.createSnapshot(sub1, snap2);
  hdfs.rename(file2, file3);

  SnapshotDiffReport diffReport;
  
  // Query the diff report and make sure it looks as expected.
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2);
  LOG.info("DiffList is " + diffReport.toString());
  List<DiffReportEntry> entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
      file2.getName()));
  
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, "");
  LOG.info("DiffList is " + diffReport.toString());
  entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file2.getName(),
      file3.getName()));
  
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
  LOG.info("DiffList is " + diffReport.toString());
  entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
      file3.getName()));
}
 
源代码27 项目: RDFS   文件: TestBalancer.java
private void createFile(long fileLen, short replicationFactor)
throws IOException {
  FileSystem fs = cluster.getFileSystem();
  DFSTestUtil.createFile(fs, filePath, fileLen, 
      replicationFactor, r.nextLong());
  DFSTestUtil.waitReplication(fs, filePath, replicationFactor);
}
 
源代码28 项目: big-c   文件: BlockReportTestBase.java
private ArrayList<Block> prepareForRide(final Path filePath,
                                        final String METHOD_NAME,
                                        long fileSize) throws IOException {
  LOG.info("Running test " + METHOD_NAME);

  DFSTestUtil.createFile(fs, filePath, fileSize,
    REPL_FACTOR, rand.nextLong());

  return locatedToBlocks(cluster.getNameNodeRpc()
    .getBlockLocations(filePath.toString(), FILE_START,
      fileSize).getLocatedBlocks(), null);
}
 
源代码29 项目: hadoop   文件: TestRenameWithSnapshots.java
@Test (timeout=60000)
public void testRenameTwiceInSnapshot() throws Exception {
  hdfs.mkdirs(sub1);
  hdfs.allowSnapshot(sub1);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(sub1, snap1);
  hdfs.rename(file1, file2);
  
  hdfs.createSnapshot(sub1, snap2);
  hdfs.rename(file2, file3);

  SnapshotDiffReport diffReport;
  
  // Query the diff report and make sure it looks as expected.
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2);
  LOG.info("DiffList is " + diffReport.toString());
  List<DiffReportEntry> entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
      file2.getName()));
  
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, "");
  LOG.info("DiffList is " + diffReport.toString());
  entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file2.getName(),
      file3.getName()));
  
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
  LOG.info("DiffList is " + diffReport.toString());
  entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
      file3.getName()));
}
 
源代码30 项目: big-c   文件: TestSnapshotDiffReport.java
/**
 * Renaming a file/dir then delete the ancestor dir of the rename target
 * should be reported as deleted.
 */
@Test
public void testDiffReportWithRenameAndDelete() throws Exception {
  final Path root = new Path("/");
  final Path dir1 = new Path(root, "dir1");
  final Path dir2 = new Path(root, "dir2");
  final Path foo = new Path(dir1, "foo");
  final Path fileInFoo = new Path(foo, "file");
  final Path bar = new Path(dir2, "bar");
  final Path fileInBar = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil
          .string2Bytes("dir2/bar/file")),
      new DiffReportEntry(DiffType.RENAME,
          DFSUtil.string2Bytes("dir1/foo/file"),
          DFSUtil.string2Bytes("dir2/bar/file")));

  // delete bar
  hdfs.delete(bar, true);
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  verifyDiffReport(root, "s0", "s2",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
      new DiffReportEntry(DiffType.DELETE,
          DFSUtil.string2Bytes("dir1/foo/file")));
}