类org.apache.hadoop.fs.Options.CreateOpts源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.Options.CreateOpts的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: SequenceFile.java

/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
 

@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
 
源代码3 项目: hadoop   文件: SymlinkBaseTest.java

@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
 
源代码4 项目: hadoop   文件: LoadGenerator.java

/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
 
源代码5 项目: gemfirexd-oss   文件: SequenceFile.java

/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
 
源代码6 项目: big-c   文件: SequenceFile.java

/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
 

@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
 
源代码8 项目: big-c   文件: SymlinkBaseTest.java

@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
 
源代码9 项目: big-c   文件: LoadGenerator.java

/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
 
源代码10 项目: gemfirexd-oss   文件: SequenceFile.java

/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
 
源代码11 项目: crate   文件: HdfsBlobContainer.java

@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
    store.execute((Operation<Void>) fileContext -> {
        Path blob = new Path(path, blobName);
        // we pass CREATE, which means it fails if a blob already exists.
        EnumSet<CreateFlag> flags = failIfAlreadyExists ? EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK) :
            EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK);
        CreateOpts[] opts = {CreateOpts.bufferSize(bufferSize)};
        try (FSDataOutputStream stream = fileContext.create(blob, flags, opts)) {
            int bytesRead;
            byte[] buffer = new byte[bufferSize];
            while ((bytesRead = inputStream.read(buffer)) != -1) {
                stream.write(buffer, 0, bytesRead);
                //  For safety we also hsync each write as well, because of its docs:
                //  SYNC_BLOCK - to force closed blocks to the disk device
                // "In addition Syncable.hsync() should be called after each write,
                //  if true synchronous behavior is required"
                stream.hsync();
            }
        } catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) {
            throw new FileAlreadyExistsException(blob.toString(), null, faee.getMessage());
        }
        return null;
    });
}
 
源代码12 项目: hadoop   文件: FileContextTestWrapper.java

public long createFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out =
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
源代码13 项目: hadoop   文件: FileContextTestWrapper.java

public void appendToFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
 
源代码14 项目: hadoop   文件: FileContextTestWrapper.java

@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
    CreateOpts... opts) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
  return fc.create(f, createFlag, opts);
}
 

private void writeReadAndDelete(int len) throws IOException {
  Path path = getTestRootPath(fc, "test/hadoop/file");
  
  fc.mkdir(path.getParent(), FsPermission.getDefault(), true);

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      CreateOpts.repFac((short) 1), CreateOpts
          .blockSize(getDefaultBlockSize()));
  out.write(data, 0, len);
  out.close();

  Assert.assertTrue("Exists", exists(fc, path));
  Assert.assertEquals("Length", len, fc.getFileStatus(path).getLen());

  FSDataInputStream in = fc.open(path);
  byte[] buf = new byte[len];
  in.readFully(0, buf);
  in.close();

  Assert.assertEquals(len, buf.length);
  for (int i = 0; i < buf.length; i++) {
    Assert.assertEquals("Position " + i, data[i], buf[i]);
  }
  
  Assert.assertTrue("Deleted", fc.delete(path, false));
  
  Assert.assertFalse("No longer exists", exists(fc, path));

}
 

@Test
public void testOutputStreamClosedTwice() throws IOException {
  //HADOOP-4760 according to Closeable#close() closing already-closed 
  //streams should have no effect. 
  Path src = getTestRootPath(fc, "test/hadoop/file");
  FSDataOutputStream out = fc.create(src, EnumSet.of(CREATE),
          Options.CreateOpts.createParent());
  
  out.writeChar('H'); //write some data
  out.close();
  out.close();
}
 

@Test
public void testSetVerifyChecksum() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  final Path path = new Path(rootPath, "zoo");

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  try {
    // instruct FS to verify checksum through the FileContext:
    fc.setVerifyChecksum(true, path);
    out.write(data, 0, data.length);
  } finally {
    out.close();
  }

  // NB: underlying FS may be different (this is an abstract test),
  // so we cannot assert .zoo.crc existence.
  // Instead, we check that the file is read correctly:
  FileStatus fileStatus = fc.getFileStatus(path);
  final long len = fileStatus.getLen();
  assertTrue(len == data.length);
  byte[] bb = new byte[(int)len];
  FSDataInputStream fsdis = fc.open(path);
  try {
    fsdis.read(bb);
  } finally {
    fsdis.close();
  }
  assertArrayEquals(data, bb);
}
 
源代码18 项目: hadoop   文件: FileContextTestHelper.java

public static long createFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out = 
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
源代码19 项目: hadoop   文件: FileContextTestHelper.java

public static void appendToFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
 
源代码20 项目: hadoop   文件: FileContextTestHelper.java

public static void writeFile(FileContext fc, Path path, byte b[])
    throws IOException {
  FSDataOutputStream out = 
    fc.create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
  out.write(b);
  out.close();
}
 
源代码21 项目: hadoop   文件: FileSystemTestWrapper.java

public long createFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out =
    create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
源代码22 项目: hadoop   文件: FileSystemTestWrapper.java

public void appendToFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fs.append(path);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
 
源代码23 项目: hadoop   文件: DataGenerator.java

/** Create a file with the name <code>file</code> and 
 * a length of <code>fileSize</code>. The file is filled with character 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  FSDataOutputStream out = fc.create(file,
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      CreateOpts.createParent(), CreateOpts.bufferSize(4096),
      CreateOpts.repFac((short) 3));
  for(long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  out.close();
}
 
源代码24 项目: big-c   文件: FileContextTestWrapper.java

public long createFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out =
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
源代码25 项目: big-c   文件: FileContextTestWrapper.java

public void appendToFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
 
源代码26 项目: big-c   文件: FileContextTestWrapper.java

@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
    CreateOpts... opts) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
  return fc.create(f, createFlag, opts);
}
 

private void writeReadAndDelete(int len) throws IOException {
  Path path = getTestRootPath(fc, "test/hadoop/file");
  
  fc.mkdir(path.getParent(), FsPermission.getDefault(), true);

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      CreateOpts.repFac((short) 1), CreateOpts
          .blockSize(getDefaultBlockSize()));
  out.write(data, 0, len);
  out.close();

  Assert.assertTrue("Exists", exists(fc, path));
  Assert.assertEquals("Length", len, fc.getFileStatus(path).getLen());

  FSDataInputStream in = fc.open(path);
  byte[] buf = new byte[len];
  in.readFully(0, buf);
  in.close();

  Assert.assertEquals(len, buf.length);
  for (int i = 0; i < buf.length; i++) {
    Assert.assertEquals("Position " + i, data[i], buf[i]);
  }
  
  Assert.assertTrue("Deleted", fc.delete(path, false));
  
  Assert.assertFalse("No longer exists", exists(fc, path));

}
 

@Test
public void testOutputStreamClosedTwice() throws IOException {
  //HADOOP-4760 according to Closeable#close() closing already-closed 
  //streams should have no effect. 
  Path src = getTestRootPath(fc, "test/hadoop/file");
  FSDataOutputStream out = fc.create(src, EnumSet.of(CREATE),
          Options.CreateOpts.createParent());
  
  out.writeChar('H'); //write some data
  out.close();
  out.close();
}
 

@Test
public void testSetVerifyChecksum() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  final Path path = new Path(rootPath, "zoo");

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  try {
    // instruct FS to verify checksum through the FileContext:
    fc.setVerifyChecksum(true, path);
    out.write(data, 0, data.length);
  } finally {
    out.close();
  }

  // NB: underlying FS may be different (this is an abstract test),
  // so we cannot assert .zoo.crc existence.
  // Instead, we check that the file is read correctly:
  FileStatus fileStatus = fc.getFileStatus(path);
  final long len = fileStatus.getLen();
  assertTrue(len == data.length);
  byte[] bb = new byte[(int)len];
  FSDataInputStream fsdis = fc.open(path);
  try {
    fsdis.read(bb);
  } finally {
    fsdis.close();
  }
  assertArrayEquals(data, bb);
}
 
源代码30 项目: big-c   文件: FileContextTestHelper.java

public static long createFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out = 
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
 类所在包
 同包方法