org.apache.hadoop.fs.FSDataOutputStream#writeByte ( )源码实例Demo

下面列出了org.apache.hadoop.fs.FSDataOutputStream#writeByte ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: TestAbandonBlock.java
@Test
/** Make sure that the quota is decremented correctly when a block is abandoned */
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
  // Setting diskspace quota to 3MB
  fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

  // Start writing a file with 2 replicas to ensure each datanode has one.
  // Block Size is 1MB.
  String src = FILE_NAME_PREFIX + "test_quota1";
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024);
  for (int i = 0; i < 1024; i++) {
    fout.writeByte(123);
  }

  // Shutdown one datanode, causing the block abandonment.
  cluster.getDataNodes().get(0).shutdown();

  // Close the file, new block will be allocated with 2MB pending size.
  try {
    fout.close();
  } catch (QuotaExceededException e) {
    fail("Unexpected quota exception when closing fout");
  }
}
 
源代码2 项目: hadoop   文件: TestRenameWithSnapshots.java
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
源代码3 项目: big-c   文件: TestAbandonBlock.java
@Test
/** Make sure that the quota is decremented correctly when a block is abandoned */
public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
  // Setting diskspace quota to 3MB
  fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);

  // Start writing a file with 2 replicas to ensure each datanode has one.
  // Block Size is 1MB.
  String src = FILE_NAME_PREFIX + "test_quota1";
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)2, 1024 * 1024);
  for (int i = 0; i < 1024; i++) {
    fout.writeByte(123);
  }

  // Shutdown one datanode, causing the block abandonment.
  cluster.getDataNodes().get(0).shutdown();

  // Close the file, new block will be allocated with 2MB pending size.
  try {
    fout.close();
  } catch (QuotaExceededException e) {
    fail("Unexpected quota exception when closing fout");
  }
}
 
源代码4 项目: big-c   文件: TestRenameWithSnapshots.java
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
源代码5 项目: RDFS   文件: LoadGenerator.java
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = System.currentTimeMillis();
  FSDataOutputStream out = fs.create(file, true, 
      getConf().getInt("io.file.buffer.size", 4096),
      (short)getConf().getInt("dfs.replication", 3),
      fs.getDefaultBlockSize());
  executionTime[CREATE] += (System.currentTimeMillis()-startTime);
  totalNumOfOps[CREATE]++;

  for (long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  startTime = System.currentTimeMillis();
  out.close();
  executionTime[WRITE_CLOSE] += (System.currentTimeMillis()-startTime);
  totalNumOfOps[WRITE_CLOSE]++;
}
 
源代码6 项目: RDFS   文件: LoadGenerator.java
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = System.currentTimeMillis();
  FSDataOutputStream out = fs.create(file, true,
      getConf().getInt("io.file.buffer.size", 4096),
      (short)getConf().getInt("dfs.replication", 3),
      fs.getDefaultBlockSize());
  executionTime[CREATE] += (System.currentTimeMillis()-startTime);
  totalNumOfOps[CREATE]++;

  for (long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  startTime = System.currentTimeMillis();
  out.close();
  executionTime[WRITE_CLOSE] += (System.currentTimeMillis()-startTime);
  totalNumOfOps[WRITE_CLOSE]++;
}
 
源代码7 项目: hadoop-gpu   文件: LoadGenerator.java
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = System.currentTimeMillis();
  FSDataOutputStream out = fs.create(file, true, 
      getConf().getInt("io.file.buffer.size", 4096),
      (short)getConf().getInt("dfs.replication", 3),
      fs.getDefaultBlockSize());
  executionTime[CREATE] += (System.currentTimeMillis()-startTime);
  totalNumOfOps[CREATE]++;

  for (long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  startTime = System.currentTimeMillis();
  out.close();
  executionTime[WRITE_CLOSE] += (System.currentTimeMillis()-startTime);
  totalNumOfOps[WRITE_CLOSE]++;
}
 
源代码8 项目: hadoop   文件: DataGenerator.java
/** Create a file with the name <code>file</code> and 
 * a length of <code>fileSize</code>. The file is filled with character 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  FSDataOutputStream out = fc.create(file,
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      CreateOpts.createParent(), CreateOpts.bufferSize(4096),
      CreateOpts.repFac((short) 3));
  for(long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  out.close();
}
 
源代码9 项目: big-c   文件: DataGenerator.java
/** Create a file with the name <code>file</code> and 
 * a length of <code>fileSize</code>. The file is filled with character 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  FSDataOutputStream out = fc.create(file,
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      CreateOpts.createParent(), CreateOpts.bufferSize(4096),
      CreateOpts.repFac((short) 3));
  for(long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  out.close();
}
 
源代码10 项目: succinct   文件: TestUtils.java
public static FSDataInputStream getStream(ByteBuffer buf) throws IOException {
  File tmpDir = Files.createTempDir();
  Path filePath = new Path(tmpDir.getAbsolutePath() + "/testOut");
  FileSystem fs = FileSystem.get(filePath.toUri(), new Configuration());
  FSDataOutputStream fOut = fs.create(filePath);
  buf.rewind();
  while (buf.hasRemaining()) {
    fOut.writeByte(buf.get());
  }
  fOut.close();
  buf.rewind();
  return fs.open(filePath);
}
 
源代码11 项目: RDFS   文件: DataGenerator.java
/** Create a file with the name <code>file</code> and 
 * a length of <code>fileSize</code>. The file is filled with character 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  FSDataOutputStream out = fs.create(file, true, 
      getConf().getInt("io.file.buffer.size", 4096),
      (short)getConf().getInt("dfs.replication", 3),
      fs.getDefaultBlockSize());
  for(long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  out.close();
}
 
源代码12 项目: RDFS   文件: DataGenerator.java
/**
 * Create a file with the name <code>file</code> and a length of
 * <code>fileSize</code>. The file is filled with character 'a'.
 */
@SuppressWarnings("unused")
private void genFile(Path file, long fileSize) throws IOException {
	FSDataOutputStream out = fs.create(file, true,
			getConf().getInt("io.file.buffer.size", 4096),
			(short) getConf().getInt("dfs.replication", 3),
			fs.getDefaultBlockSize());
	for (long i = 0; i < fileSize; i++) {
		out.writeByte('a');
	}
	out.close();
}
 
源代码13 项目: hadoop-gpu   文件: DataGenerator.java
/** Create a file with the name <code>file</code> and 
 * a length of <code>fileSize</code>. The file is filled with character 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  FSDataOutputStream out = fs.create(file, true, 
      getConf().getInt("io.file.buffer.size", 4096),
      (short)getConf().getInt("dfs.replication", 3),
      fs.getDefaultBlockSize());
  for(long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  out.close();
}