类org.apache.hadoop.io.file.tfile.TFile.Writer源码实例Demo

下面列出了怎么用org.apache.hadoop.io.file.tfile.TFile.Writer的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TestTFile.java
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
源代码2 项目: hadoop   文件: TestTFile.java
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
源代码3 项目: hadoop   文件: TestTFileSplit.java
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
源代码4 项目: hadoop   文件: TestTFileByteArrays.java
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
源代码5 项目: big-c   文件: TestTFile.java
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
源代码6 项目: big-c   文件: TestTFile.java
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
源代码7 项目: big-c   文件: TestTFileSplit.java
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
源代码8 项目: big-c   文件: TestTFileByteArrays.java
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
源代码9 项目: hadoop-gpu   文件: TestTFileByteArrays.java
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
源代码10 项目: RDFS   文件: TestTFile.java
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
源代码11 项目: RDFS   文件: TestTFileSplit.java
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
源代码12 项目: RDFS   文件: TestTFileByteArrays.java
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
源代码13 项目: hadoop-gpu   文件: TestTFile.java
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
源代码14 项目: hadoop-gpu   文件: TestTFileSplit.java
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
源代码15 项目: hadoop-gpu   文件: TestTFile.java
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
源代码16 项目: hadoop   文件: TestTFile.java
private int writeSomeRecords(Writer writer, int start, int n)
    throws IOException {
  String value = "value";
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), (value + key).getBytes());
    writer.append(key.getBytes(), (value + key).getBytes());
  }
  return (start + n);
}
 
源代码17 项目: hadoop   文件: TestTFile.java
private int writeLargeRecords(Writer writer, int start, int n)
    throws IOException {
  byte[] value = new byte[largeVal];
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), value);
    writer.append(key.getBytes(), value);
  }
  return (start + n);
}
 
源代码18 项目: hadoop   文件: TestTFile.java
private void writeEmptyRecords(Writer writer, int n) throws IOException {
  byte[] key = new byte[0];
  byte[] value = new byte[0];
  for (int i = 0; i < n; i++) {
    writer.append(key, value);
  }
}
 
源代码19 项目: hadoop   文件: TestTFile.java
private int writePrepWithUnkownLength(Writer writer, int start, int n)
    throws IOException {
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(-1);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    String value = "value" + localKey;
    out = writer.prepareAppendValue(-1);
    out.write(value.getBytes());
    out.close();
  }
  return (start + n);
}
 
源代码20 项目: hadoop   文件: TestTFile.java
private void writeRecords(Writer writer) throws IOException {
  writeEmptyRecords(writer, 10);
  int ret = writeSomeRecords(writer, 0, 100);
  ret = writeLargeRecords(writer, ret, 1);
  ret = writePrepWithKnownLength(writer, ret, 40);
  ret = writePrepWithUnkownLength(writer, ret, 50);
  writer.close();
}
 
源代码21 项目: hadoop-gpu   文件: TestTFileStreams.java
@Override
public void setUp() throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
 
源代码22 项目: hadoop   文件: TestTFile.java
private void writeNumMetablocks(Writer writer, String compression, int n)
    throws IOException {
  for (int i = 0; i < n; i++) {
    DataOutputStream dout =
        writer.prepareMetaBlock("TfileMeta" + i, compression);
    byte[] b = ("something to test" + i).getBytes();
    dout.write(b);
    dout.close();
  }
}
 
源代码23 项目: hadoop   文件: TestTFile.java
private void someTestingWithMetaBlock(Writer writer, String compression)
    throws IOException {
  DataOutputStream dout = null;
  writeNumMetablocks(writer, compression, 10);
  try {
    dout = writer.prepareMetaBlock("TfileMeta1", compression);
    assertTrue(false);
  }
  catch (MetaBlockAlreadyExists me) {
    // avoid this exception
  }
  dout = writer.prepareMetaBlock("TFileMeta100", compression);
  dout.close();
}
 
源代码24 项目: hadoop   文件: TestTFile.java
public void testMetaBlocks() throws IOException {
  Path mFile = new Path(ROOT, "meta.tfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
  someTestingWithMetaBlock(writer, "none");
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
源代码25 项目: hadoop   文件: TestTFileStreams.java
@Override
public void setUp() throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
 
源代码26 项目: hadoop   文件: TestTFileByteArrays.java
@Before
public void setUp() throws IOException {
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
 
源代码27 项目: hadoop   文件: TestTFileByteArrays.java
@Test
public void testFailureBadCompressionCodec() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  try {
    writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
    Assert.fail("Error on handling invalid compression codecs.");
  } catch (Exception e) {
    // noop, expecting exceptions
    // e.printStackTrace();
  }
}
 
源代码28 项目: hadoop   文件: TestTFileByteArrays.java
static long writeRecords(Writer writer, int count) throws IOException {
  long rawDataSize = 0;
  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
    rawDataSize +=
        WritableUtils.getVIntSize(key.length) + key.length
            + WritableUtils.getVIntSize(value.length) + value.length;
  }
  return rawDataSize;
}
 
源代码29 项目: hadoop   文件: TestTFileUnsortedByteArrays.java
@Override
public void setUp() throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, null, conf);
  writer.append("keyZ".getBytes(), "valueZ".getBytes());
  writer.append("keyM".getBytes(), "valueM".getBytes());
  writer.append("keyN".getBytes(), "valueN".getBytes());
  writer.append("keyA".getBytes(), "valueA".getBytes());
  closeOutput();
}
 
源代码30 项目: hadoop   文件: TestTFileComparators.java
public void testFailureBadComparatorNames() throws IOException {
  try {
    writer = new Writer(out, BLOCK_SIZE, compression, "badcmp", conf);
    Assert.fail("Failed to catch unsupported comparator names");
  }
  catch (Exception e) {
    // noop, expecting exceptions
    e.printStackTrace();
  }
}
 
 类所在包
 同包方法