org.apache.hadoop.fs.swift.util.SwiftTestUtils#dataset ( )源码实例Demo

下面列出了org.apache.hadoop.fs.swift.util.SwiftTestUtils#dataset ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: sahara-extra   文件: TestSeek.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  assertEquals(0, instream.getPos());
  //expect that seek to 0 works
  instream.seek(0);
  int result = instream.read();
  assertEquals(0, result);
  assertEquals(1, instream.read());
  assertEquals(2, instream.read());

  //do seek 32KB ahead
  instream.seek(32768);
  assertEquals("@32768", block[32768], (byte) instream.read());
  instream.seek(40000);
  assertEquals("@40000", block[40000], (byte) instream.read());
  instream.seek(8191);
  assertEquals("@8191", block[8191], (byte) instream.read());
  instream.seek(0);
  assertEquals("@0", 0, (byte) instream.read());
}
 
源代码2 项目: hadoop   文件: TestSeek.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  assertEquals(0, instream.getPos());
  //expect that seek to 0 works
  instream.seek(0);
  int result = instream.read();
  assertEquals(0, result);
  assertEquals(1, instream.read());
  assertEquals(2, instream.read());

  //do seek 32KB ahead
  instream.seek(32768);
  assertEquals("@32768", block[32768], (byte) instream.read());
  instream.seek(40000);
  assertEquals("@40000", block[40000], (byte) instream.read());
  instream.seek(8191);
  assertEquals("@8191", block[8191], (byte) instream.read());
  instream.seek(0);
  assertEquals("@0", 0, (byte) instream.read());
}
 
源代码3 项目: big-c   文件: TestSeek.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  assertEquals(0, instream.getPos());
  //expect that seek to 0 works
  instream.seek(0);
  int result = instream.read();
  assertEquals(0, result);
  assertEquals(1, instream.read());
  assertEquals(2, instream.read());

  //do seek 32KB ahead
  instream.seek(32768);
  assertEquals("@32768", block[32768], (byte) instream.read());
  instream.seek(40000);
  assertEquals("@40000", block[40000], (byte) instream.read());
  instream.seek(8191);
  assertEquals("@8191", block[8191], (byte) instream.read());
  instream.seek(0);
  assertEquals("@0", 0, (byte) instream.read());
}
 
源代码4 项目: big-c   文件: TestSeek.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  instream.seek(39999);
  assertTrue(-1 != instream.read());
  assertEquals (40000, instream.getPos());

  byte[] readBuffer = new byte[256];
  instream.read(128, readBuffer, 0, readBuffer.length);
  //have gone back
  assertEquals(40000, instream.getPos());
  //content is the same too
  assertEquals("@40000", block[40000], (byte) instream.read());
  //now verify the picked up data
  for (int i = 0; i < 256; i++) {
    assertEquals("@" + i, block[i + 128], readBuffer[i]);
  }
}
 
源代码5 项目: hadoop   文件: TestReadPastBuffer.java
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  byte[] block = SwiftTestUtils.dataset(SEEK_FILE_LEN, 0, 255);

  //delete the test directory
  testPath = path("/test");
  readFile = new Path(testPath, "TestReadPastBuffer.txt");
  createFile(readFile, block);
}
 
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
源代码7 项目: sahara-extra   文件: TestSeek.java
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  //delete the test directory
  testPath = path("/test");
  smallSeekFile = new Path(testPath, "seekfile.txt");
  zeroByteFile = new Path(testPath, "zero.txt");
  byte[] block = SwiftTestUtils.dataset(SMALL_SEEK_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(smallSeekFile, block);
  createEmptyFile(zeroByteFile);
}
 
源代码8 项目: hadoop   文件: TestSeek.java
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  //delete the test directory
  testPath = path("/test");
  smallSeekFile = new Path(testPath, "seekfile.txt");
  zeroByteFile = new Path(testPath, "zero.txt");
  byte[] block = SwiftTestUtils.dataset(SMALL_SEEK_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(smallSeekFile, block);
  createEmptyFile(zeroByteFile);
}
 
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
/**
 * Test writes partitioned file writing that path is qualified.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testQualifiedPath() throws Throwable {
  final Path path = path("/test/qualifiedPath");
  int len = PART_SIZE_BYTES * 4;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
/**
 * Test that when a partitioned file is overwritten by a smaller one,
 * all the old partitioned files go away
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testOverwritePartitionedFile() throws Throwable {
  final Path path = new Path("/test/testOverwritePartitionedFile");

  final int len1 = 8192;
  final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     1024);
  out.write(src1, 0, len1);
  out.close();
  long expected = getExpectedPartitionsWritten(len1,
                                               PART_SIZE_BYTES,
                                               false);
  assertPartitionsWritten("initial upload", out, expected);
  assertExists("Exists", path);
  FileStatus status = fs.getFileStatus(path);
  assertEquals("Length", len1, status.getLen());
  //now write a shorter file with a different dataset
  final int len2 = 4095;
  final byte[] src2 = SwiftTestUtils.dataset(len2, 'a', 'z');
  out = fs.create(path,
                  true,
                  getBufferSize(),
                  (short) 1,
                  1024);
  out.write(src2, 0, len2);
  out.close();
  status = fs.getFileStatus(path);
  assertEquals("Length", len2, status.getLen());
  byte[] dest = readDataset(fs, path, len2);
  //compare data
  SwiftTestUtils.compareByteArrays(src2, dest, len2);
}
 
/**
 * Test that when a partitioned file is overwritten by a smaller one,
 * all the old partitioned files go away
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testOverwritePartitionedFile() throws Throwable {
  final Path path = new Path("/test/testOverwritePartitionedFile");

  final int len1 = 8192;
  final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     1024);
  out.write(src1, 0, len1);
  out.close();
  long expected = getExpectedPartitionsWritten(len1,
                                               PART_SIZE_BYTES,
                                               false);
  assertPartitionsWritten("initial upload", out, expected);
  assertExists("Exists", path);
  FileStatus status = fs.getFileStatus(path);
  assertEquals("Length", len1, status.getLen());
  //now write a shorter file with a different dataset
  final int len2 = 4095;
  final byte[] src2 = SwiftTestUtils.dataset(len2, 'a', 'z');
  out = fs.create(path,
                  true,
                  getBufferSize(),
                  (short) 1,
                  1024);
  out.write(src2, 0, len2);
  out.close();
  status = fs.getFileStatus(path);
  assertEquals("Length", len2, status.getLen());
  byte[] dest = readDataset(fs, path, len2);
  //compare data
  SwiftTestUtils.compareByteArrays(src2, dest, len2);
}
 
源代码14 项目: sahara-extra   文件: TestReadPastBuffer.java
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  byte[] block = SwiftTestUtils.dataset(SEEK_FILE_LEN, 0, 255);

  //delete the test directory
  testPath = path("/test");
  readFile = new Path(testPath, "TestReadPastBuffer.txt");
  createFile(readFile, block);
}
 
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}
 
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}
 
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}