org.apache.hadoop.fs.FSDataInputStream#read ( )源码实例Demo

下面列出了org.apache.hadoop.fs.FSDataInputStream#read ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: big-c   文件: AbstractContractOpenTest.java
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
 
源代码2 项目: RDFS   文件: TestRaidNode.java
private void validateFile(FileSystem fileSys, Path name1, Path name2, long crc) 
  throws IOException {

  FileStatus stat1 = fileSys.getFileStatus(name1);
  FileStatus stat2 = fileSys.getFileStatus(name2);
  assertTrue(" Length of file " + name1 + " is " + stat1.getLen() + 
             " is different from length of file " + name1 + " " + stat2.getLen(),
             stat1.getLen() == stat2.getLen());

  CRC32 newcrc = new CRC32();
  FSDataInputStream stm = fileSys.open(name2);
  final byte[] b = new byte[4192];
  int num = 0;
  while (num >= 0) {
    num = stm.read(b);
    if (num < 0) {
      break;
    }
    newcrc.update(b, 0, num);
  }
  stm.close();
  if (newcrc.getValue() != crc) {
    fail("CRC mismatch of files " + name1 + " with file " + name2);
  }
}
 
源代码3 项目: RDFS   文件: TestBlockMissingException.java
private void validateFile(FileSystem fileSys, Path name)
  throws IOException {

  FSDataInputStream stm = fileSys.open(name);
  final byte[] b = new byte[4192];
  int num = 0;
  boolean gotException = false;

  try {
    while (num >= 0) {
      num = stm.read(b);
      if (num < 0) {
        break;
      }
    }
  } catch (BlockMissingException e) {
    gotException = true;
  }
  stm.close();
  assertTrue("Expected BlockMissingException ", gotException);
}
 
@Test(timeout = TestConstants.SWIFT_TEST_TIMEOUT)
public void testWriteReadFile() throws Exception {
  final Path f = new Path(getBaseURI() + "/test/test");
  final FSDataOutputStream fsDataOutputStream = sFileSystem.create(f);
  final String message = "Test string";
  fsDataOutputStream.write(message.getBytes());
  fsDataOutputStream.close();
  assertExists("created file", f);
  FSDataInputStream open = null;
  try {
    open = sFileSystem.open(f);
    final byte[] bytes = new byte[512];
    final int read = open.read(bytes);
    final byte[] buffer = new byte[read];
    System.arraycopy(bytes, 0, buffer, 0, read);
    assertEquals(message, new String(buffer));
  } finally {
    sFileSystem.delete(f, false);
    IOUtils.closeStream(open);
  }
}
 
源代码5 项目: hbase   文件: BlockIOUtils.java
/**
 * Read from an input stream at least <code>necessaryLen</code> and if possible,
 * <code>extraLen</code> also if available. Analogous to
 * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses positional read and
 * specifies a number of "extra" bytes that would be desirable but not absolutely necessary to
 * read.
 * @param buff ByteBuff to read into.
 * @param dis the input stream to read from
 * @param position the position within the stream from which to start reading
 * @param necessaryLen the number of bytes that are absolutely necessary to read
 * @param extraLen the number of extra bytes that would be nice to read
 * @return true if and only if extraLen is > 0 and reading those extra bytes was successful
 * @throws IOException if failed to read the necessary bytes
 */
public static boolean preadWithExtra(ByteBuff buff, FSDataInputStream dis, long position,
    int necessaryLen, int extraLen) throws IOException {
  int remain = necessaryLen + extraLen;
  byte[] buf = new byte[remain];
  int bytesRead = 0;
  while (bytesRead < necessaryLen) {
    int ret = dis.read(position + bytesRead, buf, bytesRead, remain);
    if (ret < 0) {
      throw new IOException("Premature EOF from inputStream (positional read returned " + ret
          + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen
          + " extra bytes, successfully read " + bytesRead);
    }
    bytesRead += ret;
    remain -= ret;
  }
  // Copy the bytes from on-heap bytes[] to ByteBuffer[] now, and after resolving HDFS-3246, we
  // will read the bytes to ByteBuffer[] directly without allocating any on-heap byte[].
  // TODO I keep the bytes copy here, because I want to abstract the ByteBuffer[]
  // preadWithExtra method for the upper layer, only need to refactor this method if the
  // ByteBuffer pread is OK.
  copyToByteBuff(buf, 0, bytesRead, buff);
  return (extraLen > 0) && (bytesRead == necessaryLen + extraLen);
}
 
源代码6 项目: hadoop   文件: DFSTestUtil.java
/**
 * Verify that two files have different contents.
 *
 * @param fs The file system containing the two files.
 * @param p1 The path of the first file.
 * @param p2 The path of the second file.
 * @param len The length of the two files.
 * @throws IOException
 */
public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
    int len)
        throws IOException {
  final FSDataInputStream in1 = fs.open(p1);
  final FSDataInputStream in2 = fs.open(p2);
  try {
    for (int i = 0; i < len; i++) {
      if (in1.read() != in2.read()) {
        return;
      }
    }
    fail("files are equal, but should not be");
  } finally {
    in1.close();
    in2.close();
  }
}
 
源代码7 项目: hadoop   文件: AbstractContractOpenTest.java
@Test
public void testOpenFileTwice() throws Throwable {
  describe("verify that two opened file streams are independent");
  Path path = path("testopenfiletwice.txt");
  byte[] block = dataset(TEST_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(getFileSystem(), path, false, block);
  //open first
  FSDataInputStream instream1 = getFileSystem().open(path);
  int c = instream1.read();
  assertEquals(0,c);
  FSDataInputStream instream2 = null;
  try {
    instream2 = getFileSystem().open(path);
    assertEquals("first read of instream 2", 0, instream2.read());
    assertEquals("second read of instream 1", 1, instream1.read());
    instream1.close();
    assertEquals("second read of instream 2", 1, instream2.read());
    //close instream1 again
    instream1.close();
  } finally {
    IOUtils.closeStream(instream1);
    IOUtils.closeStream(instream2);
  }
}
 
源代码8 项目: big-c   文件: TestSwiftFileSystemRename.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testRenameFile() throws Exception {
  assumeRenameSupported();

  final Path old = new Path("/test/alice/file");
  final Path newPath = new Path("/test/bob/file");
  fs.mkdirs(newPath.getParent());
  final FSDataOutputStream fsDataOutputStream = fs.create(old);
  final byte[] message = "Some data".getBytes();
  fsDataOutputStream.write(message);
  fsDataOutputStream.close();

  assertTrue(fs.exists(old));
  rename(old, newPath, true, false, true);

  final FSDataInputStream bobStream = fs.open(newPath);
  final byte[] bytes = new byte[512];
  final int read = bobStream.read(bytes);
  bobStream.close();
  final byte[] buffer = new byte[read];
  System.arraycopy(bytes, 0, buffer, 0, read);
  assertEquals(new String(message), new String(buffer));
}
 
源代码9 项目: RDFS   文件: TestHftpFileSystem.java
public void readHftpFile(
  boolean strictContentLength, boolean sendContentLength
)
  throws IOException, URISyntaxException {
  int bufSize = 128 * 1024;
  byte[] buf = DFSTestUtil.generateSequentialBytes(0, bufSize);
  final ByteArrayInputStream inputStream = new ByteArrayInputStream(buf);
  final long contentLength = bufSize + 1;
  Configuration conf = new Configuration();

  conf.setBoolean(HftpFileSystem.STRICT_CONTENT_LENGTH, strictContentLength);

  HftpFileSystem fileSystem =
    new MockHftpFileSystem(
      sendContentLength ? contentLength : null, inputStream, conf
    );
  FSDataInputStream dataInputStream = fileSystem.open(new Path("dont-care"));
  byte[] readBuf = new byte[1024];

  while (dataInputStream.read(readBuf) > -1) {
    //nothing
  }

  dataInputStream.close();
}
 
源代码10 项目: hbase   文件: TestFileLink.java
private static void skipBuffer(FSDataInputStream in, byte v) throws IOException {
  byte[] data = new byte[8192];
  try {
    int n;
    while ((n = in.read(data)) == data.length) {
      for (int i = 0; i < data.length; ++i) {
        if (data[i] != v)
          throw new Exception("File changed");
      }
    }
  } catch (Exception e) {
  }
}
 
源代码11 项目: hadoop   文件: TestFiDataTransferProtocol2.java
/**
 * 1. create files with dfs
 * 2. write MIN_N_PACKET to MAX_N_PACKET packets
 * 3. close file
 * 4. open the same file
 * 5. read the bytes and compare results
 */
private static void writeSeveralPackets(String methodName) throws IOException {
  final Random r = FiTestUtil.RANDOM.get();
  final int nPackets = FiTestUtil.nextRandomInt(MIN_N_PACKET, MAX_N_PACKET + 1);
  final int lastPacketSize = FiTestUtil.nextRandomInt(1, PACKET_SIZE + 1);
  final int size = (nPackets - 1)*PACKET_SIZE + lastPacketSize;

  FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
      + ", lastPacketSize=" + lastPacketSize);

  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 2).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);

    final long seed = r.nextLong();
    final Random ran = new Random(seed);
    ran.nextBytes(bytes);
    out.write(bytes, 0, size);
    out.close();

    final FSDataInputStream in = dfs.open(p);
    int totalRead = 0;
    int nRead = 0;
    while ((nRead = in.read(toRead, totalRead, size - totalRead)) > 0) {
      totalRead += nRead;
    }
    Assert.assertEquals("Cannot read file.", size, totalRead);
    for (int i = 0; i < size; i++) {
      Assert.assertTrue("File content differ.", bytes[i] == toRead[i]);
    }
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
源代码12 项目: big-c   文件: AppendTestUtil.java
public static void check(DistributedFileSystem fs, Path p, int position,
    int length) throws IOException {
  byte[] buf = new byte[length];
  int i = 0;
  try {
    FSDataInputStream in = fs.open(p);
    in.read(position, buf, 0, buf.length);
    for(i = position; i < length + position; i++) {
      assertEquals((byte) i, buf[i - position]);
    }
    in.close();
  } catch(IOException ioe) {
    throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
  }
}
 
/** Test whether corrupt replicas are detected correctly during pipeline
 * recoveries.
 */
@Test
public void testPipelineRecoveryForLastBlock() throws IOException {
  DFSClientFaultInjector faultInjector
      = Mockito.mock(DFSClientFaultInjector.class);
  DFSClientFaultInjector oldInjector = DFSClientFaultInjector.instance;
  DFSClientFaultInjector.instance = faultInjector;
  Configuration conf = new HdfsConfiguration();

  conf.setInt(DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 3);
  MiniDFSCluster cluster = null;

  try {
    int numDataNodes = 3;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    cluster.waitActive();
    FileSystem fileSys = cluster.getFileSystem();

    Path file = new Path("dataprotocol1.dat");
    Mockito.when(faultInjector.failPacket()).thenReturn(true);
    DFSTestUtil.createFile(fileSys, file, 68000000L, (short)numDataNodes, 0L);

    // At this point, NN should have accepted only valid replicas.
    // Read should succeed.
    FSDataInputStream in = fileSys.open(file);
    try {
      int c = in.read();
      // Test will fail with BlockMissingException if NN does not update the
      // replica state based on the latest report.
    } catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
      Assert.fail("Block is missing because the file was closed with"
          + " corrupt replicas.");
    }
  } finally {
    DFSClientFaultInjector.instance = oldInjector;
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
源代码14 项目: Bats   文件: FooterGatherer.java
private static final void readFully(FSDataInputStream stream, long start, byte[] output, int offset, int len) throws IOException{
  int bytesRead = 0;
  while(bytesRead > -1 && bytesRead < len){
    bytesRead += stream.read(start+bytesRead, output, offset + bytesRead, len-bytesRead);
  }
}
 
源代码15 项目: hadoop   文件: TestHFlush.java
/**
 * The method starts new cluster with defined Configuration; creates a file
 * with specified block_size and writes 10 equal sections in it; it also calls
 * hflush/hsync after each write and throws an IOException in case of an error.
 * 
 * @param conf cluster configuration
 * @param fileName of the file to be created and processed as required
 * @param block_size value to be used for the file's creation
 * @param replicas is the number of replicas
 * @param isSync hsync or hflush         
 * @param syncFlags specify the semantic of the sync/flush
 * @throws IOException in case of any errors
 */
public static void doTheJob(Configuration conf, final String fileName,
    long block_size, short replicas, boolean isSync,
    EnumSet<SyncFlag> syncFlags) throws IOException {
  byte[] fileContent;
  final int SECTIONS = 10;

  fileContent = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                             .numDataNodes(replicas).build();
  // Make sure we work with DFS in order to utilize all its functionality
  DistributedFileSystem fileSystem = cluster.getFileSystem();

  FSDataInputStream is;
  try {
    Path path = new Path(fileName);
    final String pathName = new Path(fileSystem.getWorkingDirectory(), path)
        .toUri().getPath();
    FSDataOutputStream stm = fileSystem.create(path, false, 4096, replicas,
        block_size);
    System.out.println("Created file " + fileName);

    int tenth = AppendTestUtil.FILE_SIZE/SECTIONS;
    int rounding = AppendTestUtil.FILE_SIZE - tenth * SECTIONS;
    for (int i=0; i<SECTIONS; i++) {
      System.out.println("Writing " + (tenth * i) + " to "
          + (tenth * (i + 1)) + " section to file " + fileName);
      // write to the file
      stm.write(fileContent, tenth * i, tenth);
      
      // Wait while hflush/hsync pushes all packets through built pipeline
      if (isSync) {
        ((DFSOutputStream)stm.getWrappedStream()).hsync(syncFlags);
      } else {
        ((DFSOutputStream)stm.getWrappedStream()).hflush();
      }
      
      // Check file length if updatelength is required
      if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
        long currentFileLength = fileSystem.getFileStatus(path).getLen();
        assertEquals(
          "File size doesn't match for hsync/hflush with updating the length",
          tenth * (i + 1), currentFileLength);
      } else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
        LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
        assertEquals(i + 1, blocks.getLocatedBlocks().size());
      }

      byte [] toRead = new byte[tenth];
      byte [] expected = new byte[tenth];
      System.arraycopy(fileContent, tenth * i, expected, 0, tenth);
      // Open the same file for read. Need to create new reader after every write operation(!)
      is = fileSystem.open(path);
      is.seek(tenth * i);
      int readBytes = is.read(toRead, 0, tenth);
      System.out.println("Has read " + readBytes);
      assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
      is.close();
      checkData(toRead, 0, readBytes, expected, "Partial verification");
    }
    System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding) + " section to file " + fileName);
    stm.write(fileContent, tenth * SECTIONS, rounding);
    stm.close();

    assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
    AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
  } finally {
    fileSystem.close();
    cluster.shutdown();
  }
}
 
源代码16 项目: big-c   文件: TestFsDatasetCacheRevocation.java
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
源代码17 项目: RDFS   文件: TestTruncatedInputBug.java
/**
 * When mark() is used on BufferedInputStream, the request
 * size on the checksum file system can be small.  However,
 * checksum file system currently depends on the request size
 * >= bytesPerSum to work properly.
 */
public void testTruncatedInputBug() throws IOException {
  final int ioBufSize = 512;
  final int fileSize = ioBufSize*4;
  int filePos = 0;

  Configuration conf = new Configuration();
  conf.setInt("io.file.buffer.size", ioBufSize);
  FileSystem fileSys = FileSystem.getLocal(conf);

  try {
    // First create a test input file.
    Path testFile = new Path(TEST_ROOT_DIR, "HADOOP-1489");
    writeFile(fileSys, testFile, fileSize);
    assertTrue(fileSys.exists(testFile));
    assertTrue(fileSys.getLength(testFile) == fileSize);

    // Now read the file for ioBufSize bytes
    FSDataInputStream in = fileSys.open(testFile, ioBufSize);
    // seek beyond data buffered by open
    filePos += ioBufSize * 2 + (ioBufSize - 10);  
    in.seek(filePos);

    // read 4 more bytes before marking
    for (int i = 0; i < 4; ++i) {  
      if (in.read() == -1) {
        break;
      }
      ++filePos;
    }

    // Now set mark() to trigger the bug
    // NOTE: in the fixed code, mark() does nothing (not supported) and
    //   hence won't trigger this bug.
    in.mark(1);
    System.out.println("MARKED");
    
    // Try to read the rest
    while (filePos < fileSize) {
      if (in.read() == -1) {
        break;
      }
      ++filePos;
    }
    in.close();

    System.out.println("Read " + filePos + " bytes."
                       + " file size=" + fileSize);
    assertTrue(filePos == fileSize);

  } finally {
    try {
      fileSys.close();
    } catch (Exception e) {
      // noop
    }
  }
}
 
源代码18 项目: RDFS   文件: TestDFSIsUnderConstruction.java
public void testSecondLastBlockNotReceived() throws Exception {
  String fileName = "/testSecondLastBlockNotReceived";
  Path growingFile = new Path(fileName); 
  FSDataInputStream fis = null;
  FSDataOutputStream fos = fs.create(growingFile, false, 1024, (short)1, 1024);
  try {
    int fileLength = 2096;
    AppendTestUtil.write(fos, 0, fileLength);
    fos.sync();

    fis = fs.open(growingFile);
    for (int i = 0; i < fileLength; i++) {
      fis.read();
    }
    fis.close();

    FSNamesystem fsns = cluster.getNameNode().namesystem;
    INode[] inodes = fsns.dir.getExistingPathINodes(fileName);
    BlockInfo[] bis = ((INodeFile) (inodes[inodes.length - 1])).getBlocks();
    bis[bis.length - 2].setNumBytes(1);

    try {
      fis = fs.open(growingFile);
      TestCase.fail();
    } catch (IOException e) {
    }
    bis[bis.length - 2].setNumBytes(1024);

    bis[bis.length - 1].setNumBytes(1);
    fis = fs.open(growingFile);
    for (int i = 0; i < fileLength; i++) {
      fis.read();
    }
  } finally {
    if (fos != null) {
      fos.close();
    }
    if (fis != null) {
      fis.close();
    }
  }
}
 
源代码19 项目: hadoop   文件: TestSnapshotFileLength.java
/**
 * Adding as part of jira HDFS-5343
 * Test for checking the cat command on snapshot path it
 *  cannot read a file beyond snapshot file length
 * @throws Exception
 */
@Test (timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {

  FSDataInputStream fis = null;
  FileStatus fileStatus = null;

  int bytesRead;
  byte[] buffer = new byte[BLOCKSIZE * 8];

  hdfs.mkdirs(sub);
  Path file1 = new Path(sub, file1Name);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);

  hdfs.allowSnapshot(sub);
  hdfs.createSnapshot(sub, snapshot1);

  DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);

  // Make sure we can read the entire file via its non-snapshot path.
  fileStatus = hdfs.getFileStatus(file1);
  assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
  fis = hdfs.open(file1);
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
  fis.close();

  Path file1snap1 =
      SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
  fis = hdfs.open(file1snap1);
  fileStatus = hdfs.getFileStatus(file1snap1);
  assertEquals(fileStatus.getLen(), BLOCKSIZE);
  // Make sure we can only read up to the snapshot length.
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
  fis.close();

  PrintStream outBackup = System.out;
  PrintStream errBackup = System.err;
  ByteArrayOutputStream bao = new ByteArrayOutputStream();
  System.setOut(new PrintStream(bao));
  System.setErr(new PrintStream(bao));
  // Make sure we can cat the file upto to snapshot length
  FsShell shell = new FsShell();
  try {
    ToolRunner.run(conf, shell, new String[] { "-cat",
    "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
    assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
  } finally {
    System.setOut(outBackup);
    System.setErr(errBackup);
  }
}
 
源代码20 项目: RDFS   文件: TestKosmosFileSystem.java
public void testFileIO() throws Exception {
    Path subDir1 = new Path("dir.1");
    Path file1 = new Path("dir.1/foo.1");

    kosmosFileSystem.mkdirs(baseDir);
    assertTrue(kosmosFileSystem.isDirectory(baseDir));
    kosmosFileSystem.setWorkingDirectory(baseDir);

    kosmosFileSystem.mkdirs(subDir1);

    FSDataOutputStream s1 = kosmosFileSystem.create(file1, true, 4096, (short) 1, (long) 4096, null);

    int bufsz = 4096;
    byte[] data = new byte[bufsz];

    for (int i = 0; i < data.length; i++)
        data[i] = (byte) (i % 16);

    // write 4 bytes and read them back; read API should return a byte per call
    s1.write(32);
    s1.write(32);
    s1.write(32);
    s1.write(32);
    // write some data
    s1.write(data, 0, data.length);
    // flush out the changes
    s1.close();

    // Read the stuff back and verify it is correct
    FSDataInputStream s2 = kosmosFileSystem.open(file1, 4096);
    int v;

    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);
    v = s2.read();
    assertEquals(v, 32);

    assertEquals(s2.available(), data.length);

    byte[] buf = new byte[bufsz];
    s2.read(buf, 0, buf.length);
    for (int i = 0; i < data.length; i++)
        assertEquals(data[i], buf[i]);

    assertEquals(s2.available(), 0);

    s2.close();

    kosmosFileSystem.delete(file1, true);
    assertFalse(kosmosFileSystem.exists(file1));        
    kosmosFileSystem.delete(subDir1, true);
    assertFalse(kosmosFileSystem.exists(subDir1));        
    kosmosFileSystem.delete(baseDir, true);
    assertFalse(kosmosFileSystem.exists(baseDir));        
}