org.apache.hadoop.io.file.tfile.TFile.Reader#close ( )源码实例Demo

下面列出了org.apache.hadoop.io.file.tfile.TFile.Reader#close ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop-gpu   文件: TestTFileByteArrays.java
public void testFailureNegativeOffset_2() throws IOException {
  if (skip)
    return;
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  try {
    scanner.lowerBound("keyX".getBytes(), -1, 4);
    Assert.fail("Error on handling negative offset.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  finally {
    reader.close();
    scanner.close();
  }
  closeOutput();
}
 
源代码2 项目: hadoop-gpu   文件: TestTFileStreams.java
public void testFailureNegativeOffset() throws IOException {
  if (skip)
    return;
  writeRecords(2, true, true);

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  byte[] buf = new byte[K];
  try {
    scanner.entry().getKey(buf, -1);
    Assert.fail("Failed to handle key negative offset.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  finally {
  }
  scanner.close();
  reader.close();
}
 
源代码3 项目: RDFS   文件: TestTFileByteArrays.java
@Test
public void testFailureGetNonExistentMetaBlock() throws IOException {
  if (skip)
    return;
  writer.append("keyX".getBytes(), "valueX".getBytes());

  // create a new metablock
  DataOutputStream outMeta =
      writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName());
  outMeta.write(123);
  outMeta.write("foo".getBytes());
  outMeta.close();
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  DataInputStream mb = reader.getMetaBlock("testX");
  Assert.assertNotNull(mb);
  mb.close();
  try {
    DataInputStream mbBad = reader.getMetaBlock("testY");
    Assert.fail("Error on handling non-existent metablocks.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  reader.close();
}
 
源代码4 项目: big-c   文件: TestTFileUnsortedByteArrays.java
public void testFailureScannerWithKeys() throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertFalse(reader.isSorted());
  Assert.assertEquals((int) reader.getEntryCount(), 4);

  try {
    Scanner scanner =
        reader.createScannerByKey("aaa".getBytes(), "zzz".getBytes());
    Assert
        .fail("Failed to catch creating scanner with keys on unsorted file.");
  }
  catch (RuntimeException e) {
  }
  finally {
    reader.close();
  }
}
 
源代码5 项目: hadoop-gpu   文件: TestTFileUnsortedByteArrays.java
public void testFailureScannerWithKeys() throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertFalse(reader.isSorted());
  Assert.assertEquals((int) reader.getEntryCount(), 4);

  try {
    Scanner scanner =
        reader.createScanner("aaa".getBytes(), "zzz".getBytes());
    Assert
        .fail("Failed to catch creating scanner with keys on unsorted file.");
  }
  catch (RuntimeException e) {
  }
  finally {
    reader.close();
  }
}
 
源代码6 项目: hadoop   文件: TestTFileByteArrays.java
@Test
public void testFailureNegativeOffset_2() throws IOException {
  if (skip)
    return;
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  try {
    scanner.lowerBound("keyX".getBytes(), -1, 4);
    Assert.fail("Error on handling negative offset.");
  } catch (Exception e) {
    // noop, expecting exceptions
  } finally {
    reader.close();
    scanner.close();
  }
  closeOutput();
}
 
源代码7 项目: hadoop   文件: TestTFileByteArrays.java
@Test
public void testFailureNegativeLength_2() throws IOException {
  if (skip)
    return;
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  try {
    scanner.lowerBound("keyX".getBytes(), 0, -1);
    Assert.fail("Error on handling negative length.");
  } catch (Exception e) {
    // noop, expecting exceptions
  } finally {
    scanner.close();
    reader.close();
  }
  closeOutput();
}
 
源代码8 项目: big-c   文件: TestTFileByteArrays.java
private void readValueBeforeKey(int recordIndex)
    throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner =
      reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
          .getBytes(), null);

  try {
    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + recordIndex);

    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
        recordIndex));
  } finally {
    scanner.close();
    reader.close();
  }
}
 
源代码9 项目: hadoop-gpu   文件: TestTFileByteArrays.java
private void readValueBeforeKey(int count, int recordIndex)
    throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner =
      reader.createScanner(composeSortedKey(KEY, count, recordIndex)
          .getBytes(), null);

  try {
    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + recordIndex);

    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
        count, recordIndex));
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
源代码10 项目: RDFS   文件: TestTFileByteArrays.java
private void readValueBeforeKey(int recordIndex)
    throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner =
      reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
          .getBytes(), null);

  try {
    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + recordIndex);

    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
        recordIndex));
  } finally {
    scanner.close();
    reader.close();
  }
}
 
源代码11 项目: big-c   文件: TestTFileByteArrays.java
@Test
public void testFailureGetNonExistentMetaBlock() throws IOException {
  if (skip)
    return;
  writer.append("keyX".getBytes(), "valueX".getBytes());

  // create a new metablock
  DataOutputStream outMeta =
      writer.prepareMetaBlock("testX", Compression.Algorithm.GZ.getName());
  outMeta.write(123);
  outMeta.write("foo".getBytes());
  outMeta.close();
  closeOutput();

  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  DataInputStream mb = reader.getMetaBlock("testX");
  Assert.assertNotNull(mb);
  mb.close();
  try {
    DataInputStream mbBad = reader.getMetaBlock("testY");
    Assert.fail("Error on handling non-existent metablocks.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  reader.close();
}
 
源代码12 项目: big-c   文件: TestTFileByteArrays.java
private void checkBlockIndex(int recordIndex, int blockIndexExpected) throws IOException {
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  scanner.seekTo(composeSortedKey(KEY, recordIndex).getBytes());
  Assert.assertEquals(blockIndexExpected, scanner.currentLocation
      .getBlockIndex());
  scanner.close();
  reader.close();
}
 
源代码13 项目: big-c   文件: TestTFile.java
public void testMetaBlocks() throws IOException {
  Path mFile = new Path(ROOT, "meta.tfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
  someTestingWithMetaBlock(writer, "none");
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
源代码14 项目: hadoop   文件: TestTFileByteArrays.java
@Test
public void testLocate() throws IOException {
  if (skip)
    return;
  writeRecords(3 * records1stBlock);
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner = reader.createScanner();
  locate(scanner, composeSortedKey(KEY, 2).getBytes());
  locate(scanner, composeSortedKey(KEY, records1stBlock - 1).getBytes());
  locate(scanner, composeSortedKey(KEY, records1stBlock).getBytes());
  Location locX = locate(scanner, "keyX".getBytes());
  Assert.assertEquals(scanner.endLocation, locX);
  scanner.close();
  reader.close();
}
 
源代码15 项目: hadoop-gpu   文件: TestTFileByteArrays.java
private void readKeyWithoutValue(int count, int recordIndex)
    throws IOException {
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Scanner scanner =
      reader.createScanner(composeSortedKey(KEY, count, recordIndex)
          .getBytes(), null);

  try {
    // read the indexed key
    byte[] kbuf1 = new byte[BUF_SIZE];
    int klen1 = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf1);
    Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
        count, recordIndex));

    if (scanner.advance() && !scanner.atEnd()) {
      // read the next key following the indexed
      byte[] kbuf2 = new byte[BUF_SIZE];
      int klen2 = scanner.entry().getKeyLength();
      scanner.entry().getKey(kbuf2);
      Assert.assertEquals(new String(kbuf2, 0, klen2), composeSortedKey(KEY,
          count, recordIndex + 1));
    }
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
源代码16 项目: hadoop-gpu   文件: TestTFileByteArrays.java
private void readKeyManyTimes(int count, int recordIndex) throws IOException {
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);

  Scanner scanner =
      reader.createScanner(composeSortedKey(KEY, count, recordIndex)
          .getBytes(), null);

  // read the indexed key
  byte[] kbuf1 = new byte[BUF_SIZE];
  int klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      count, recordIndex));

  klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      count, recordIndex));

  klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      count, recordIndex));

  scanner.close();
  reader.close();
}
 
源代码17 项目: hadoop-gpu   文件: TestTFileUnsortedByteArrays.java
public void testScanRange() throws IOException {
  Reader reader =
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
  Assert.assertFalse(reader.isSorted());
  Assert.assertEquals((int) reader.getEntryCount(), 4);

  Scanner scanner = reader.createScanner();

  try {

    // read key and value
    byte[] kbuf = new byte[BUF_SIZE];
    int klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");

    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");

    scanner.advance();

    // now try get value first
    vbuf = new byte[BUF_SIZE];
    vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");

    kbuf = new byte[BUF_SIZE];
    klen = scanner.entry().getKeyLength();
    scanner.entry().getKey(kbuf);
    Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
  }
  finally {
    scanner.close();
    reader.close();
  }
}
 
源代码18 项目: hadoop   文件: TestTFileByteArrays.java
private void readKeyManyTimes(int recordIndex) throws IOException {
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);

  Scanner scanner =
      reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
          .getBytes(), null);

  // read the indexed key
  byte[] kbuf1 = new byte[BUF_SIZE];
  int klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      recordIndex));

  klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      recordIndex));

  klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      recordIndex));

  scanner.close();
  reader.close();
}
 
源代码19 项目: big-c   文件: TestTFileByteArrays.java
private void readKeyManyTimes(int recordIndex) throws IOException {
  Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);

  Scanner scanner =
      reader.createScannerByKey(composeSortedKey(KEY, recordIndex)
          .getBytes(), null);

  // read the indexed key
  byte[] kbuf1 = new byte[BUF_SIZE];
  int klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      recordIndex));

  klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      recordIndex));

  klen1 = scanner.entry().getKeyLength();
  scanner.entry().getKey(kbuf1);
  Assert.assertEquals(new String(kbuf1, 0, klen1), composeSortedKey(KEY,
      recordIndex));

  scanner.close();
  reader.close();
}
 
源代码20 项目: big-c   文件: TestTFile.java
/**
 * test none codecs
 */
void basicWithSomeCodec(String codec) throws IOException {
  Path ncTFile = new Path(ROOT, "basic.tfile");
  FSDataOutputStream fout = createFSOutput(ncTFile);
  Writer writer = new Writer(fout, minBlockSize, codec, "memcmp", conf);
  writeRecords(writer);
  fout.close();
  FSDataInputStream fin = fs.open(ncTFile);
  Reader reader =
      new Reader(fs.open(ncTFile), fs.getFileStatus(ncTFile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.seekTo(getSomeKey(50));
  assertTrue("location lookup failed", scanner.seekTo(getSomeKey(50)));
  // read the key and see if it matches
  byte[] readKey = readKey(scanner);
  assertTrue("seeked key does not match", Arrays.equals(getSomeKey(50),
      readKey));

  scanner.seekTo(new byte[0]);
  byte[] val1 = readValue(scanner);
  scanner.seekTo(new byte[0]);
  byte[] val2 = readValue(scanner);
  assertTrue(Arrays.equals(val1, val2));
  
  // check for lowerBound
  scanner.lowerBound(getSomeKey(50));
  assertTrue("locaton lookup failed", scanner.currentLocation
      .compareTo(reader.end()) < 0);
  readKey = readKey(scanner);
  assertTrue("seeked key does not match", Arrays.equals(readKey,
      getSomeKey(50)));

  // check for upper bound
  scanner.upperBound(getSomeKey(50));
  assertTrue("location lookup failed", scanner.currentLocation
      .compareTo(reader.end()) < 0);
  readKey = readKey(scanner);
  assertTrue("seeked key does not match", Arrays.equals(readKey,
      getSomeKey(51)));

  scanner.close();
  // test for a range of scanner
  scanner = reader.createScannerByKey(getSomeKey(10), getSomeKey(60));
  readAndCheckbytes(scanner, 10, 50);
  assertFalse(scanner.advance());
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(ncTFile, true);
}