org.apache.hadoop.fs.FSDataInputStream#readUTF ( )源码实例Demo

下面列出了org.apache.hadoop.fs.FSDataInputStream#readUTF ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: BigData-In-Practice   文件: HdfsCreate.java
public static void main(String[] args) throws IOException {
    //获取文件系统
    FileSystem fileSystem = SysUtil.getFileSystem();

    // 如果因为权限而无法写入,可以先修改权限 hadoop dfs -chmod 777 /hadoop
    Path path = new Path("/hadoop/create.txt");
    // 获取输出流
    FSDataOutputStream outputStream = fileSystem.create(path);
    // 写入一些内容
    outputStream.writeUTF("Hello HDFS!");
    outputStream.close();

    // ------写入完毕后,再读出来-----------
    // 获取该文件的输入流
    FSDataInputStream inputStream = fileSystem.open(path);
    String data = inputStream.readUTF();
    System.out.println(data);
    // 输出: Hello HDFS!

    fileSystem.close();
}
 
源代码2 项目: cephfs-hadoop   文件: HcfsFileSystemTest.java
@org.junit.Test
public void testTextWriteAndRead() throws Exception{

    String testString="Is there anyone out there?";
    String readChars=null;

    FSDataOutputStream dfsOut=null;
    dfsOut=fs.create(new Path("test1.txt"));
    dfsOut.writeUTF(testString);
    dfsOut.close();

    FSDataInputStream dfsin=null;

    dfsin=fs.open(new Path("test1.txt"));
    readChars=dfsin.readUTF();
    dfsin.close();

    assertEquals(testString, readChars);

    fs.delete(new Path("test1.txt"), true);

    assertFalse(fs.exists(new Path("test1")));
}
 
源代码3 项目: hadoop   文件: HistoryFileManager.java
private String getJobSummary(FileContext fc, Path path) throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
源代码4 项目: hadoop   文件: TestJobHistoryParsing.java
private static String getJobSummary(FileContext fc, Path path)
    throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
源代码5 项目: big-c   文件: HistoryFileManager.java
private String getJobSummary(FileContext fc, Path path) throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
源代码6 项目: big-c   文件: TestJobHistoryParsing.java
private static String getJobSummary(FileContext fc, Path path)
    throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
private String readFileFromHdfs(String filename) throws Exception {
    FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle();
    FSDataInputStream reader = hdfsFsHandle.open(new Path(filename));
    String output = reader.readUTF();
    reader.close();
    hdfsFsHandle.close();
    return output;
}
 
源代码8 项目: hadoop   文件: TestCopyFiles.java
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
源代码9 项目: big-c   文件: TestCopyFiles.java
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}