org.apache.hadoop.io.BytesWritable#getSize ( )源码实例Demo

下面列出了org.apache.hadoop.io.BytesWritable#getSize ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: RDFS   文件: TFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.get(), size, remain);
      size += remain;
    }
    return value.getSize();
  } finally {
    dis.close();
  }
}
 
源代码2 项目: hadoop-gpu   文件: TFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.get(), size, remain);
      size += remain;
    }
    return value.getSize();
  } finally {
    dis.close();
  }
}
 
源代码3 项目: hadoop   文件: TestTFileSeek.java
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.get(), 0, key.getSize());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getSize();
      totalBytes += val.getSize();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
源代码4 项目: big-c   文件: TestTFileSeek.java
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.get(), 0, key.getSize());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getSize();
      totalBytes += val.getSize();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
源代码5 项目: attic-apex-malhar   文件: TestTFileSeek.java
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.get(), 0, key.getSize());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getSize();
      totalBytes += val.getSize();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
源代码6 项目: RDFS   文件: TestTFileSeek.java
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.get(), 0, key.getSize());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getSize();
      totalBytes += val.getSize();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
源代码7 项目: emr-sample-apps   文件: MerRecord.java
public void fromBytes(BytesWritable t)
{
	byte [] raw = t.get();
	int rawlen = t.getSize();
	
	//sbuffer[0] = (byte) ((isReference ? 0x01 : 0x00) | (isRC ? 0x10 : 0x00));
	
	isReference = (raw[0] & 0x01) == 0x01;
	isRC        = (raw[0] & 0x10) == 0x10;
	
	offset = (raw[1] & 0xFF) << 24 
	       | (raw[2] & 0xFF) << 16
	       | (raw[3] & 0xFF) << 8
	       | (raw[4] & 0xFF);
	
	id = (raw[5] & 0xFF) << 24 
          | (raw[6] & 0xFF) << 16
          | (raw[7] & 0xFF) << 8
          | (raw[8] & 0xFF);

	int fieldstart = 9;
	
	for (int i = fieldstart; i < rawlen; i++)
	{
		if (raw[i] == DNAString.hardstop)
		{
			//leftFlank = DNAString.dnaToArr(raw, fieldstart, i-fieldstart);
			leftFlank = new byte[i-fieldstart];
			System.arraycopy(raw, fieldstart, leftFlank, 0, i-fieldstart);
			
			fieldstart = i+1; // skip the hardstop
			break;
		}
	}
	
	rightFlank = new byte[rawlen - fieldstart];
	System.arraycopy(raw, fieldstart, rightFlank, 0, rawlen-fieldstart);
	//rightFlank = DNAString.dnaToArr(raw, fieldstart, rawlen-fieldstart);
}
 
源代码8 项目: emr-sample-apps   文件: FastaRecord.java
public void fromBytes(BytesWritable t)
{
	byte [] raw = t.get();
	int rawlen = t.getSize();
	
	m_lastChunk   = raw[0] == 1;
	
	m_offset =   (raw[1] & 0xFF) << 24 
	           | (raw[2] & 0xFF) << 16
	           | (raw[3] & 0xFF) << 8
	           | (raw[4] & 0xFF);
	
	int sl = rawlen - 5;
	m_sequence = DNAString.dnaToArr(raw, 5, sl);
}
 
源代码9 项目: hadoop-gpu   文件: TestTFileSeek.java
public void seekTFile() throws IOException {
  int miss = 0;
  long totalBytes = 0;
  FSDataInputStream fsdis = fs.open(path);
  Reader reader =
    new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
  KeySampler kSampler =
      new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
          keyLenGen);
  Scanner scanner = reader.createScanner();
  BytesWritable key = new BytesWritable();
  BytesWritable val = new BytesWritable();
  timer.reset();
  timer.start();
  for (int i = 0; i < options.seekCount; ++i) {
    kSampler.next(key);
    scanner.lowerBound(key.get(), 0, key.getSize());
    if (!scanner.atEnd()) {
      scanner.entry().get(key, val);
      totalBytes += key.getSize();
      totalBytes += val.getSize();
    }
    else {
      ++miss;
    }
  }
  timer.stop();
  double duration = (double) timer.read() / 1000; // in us.
  System.out.printf(
      "time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
      timer.toString(), NanoTimer.nanoTimeToString(timer.read()
          / options.seekCount), options.seekCount - miss, miss,
      (double) totalBytes / 1024 / (options.seekCount - miss));

}
 
源代码10 项目: hadoop   文件: TestTFileSeek.java
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    Writer writer =
        new Writer(fout, options.minBlockSize, options.compress, "memcmp",
            conf);
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
            .getSize());
        totalBytes += key.getSize();
        totalBytes += val.getSize();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
源代码11 项目: big-c   文件: TestTFileSeek.java
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    Writer writer =
        new Writer(fout, options.minBlockSize, options.compress, "memcmp",
            conf);
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
            .getSize());
        totalBytes += key.getSize();
        totalBytes += val.getSize();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
源代码12 项目: attic-apex-malhar   文件: TestTFileSeek.java
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    Writer writer =
        new Writer(fout, options.minBlockSize, options.compress, "memcmp",
            conf);
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
            .getSize());
        totalBytes += key.getSize();
        totalBytes += val.getSize();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
源代码13 项目: RDFS   文件: TestTFileSeek.java
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    Writer writer =
        new Writer(fout, options.minBlockSize, options.compress, "memcmp",
            conf);
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
            .getSize());
        totalBytes += key.getSize();
        totalBytes += val.getSize();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
源代码14 项目: hadoop-gpu   文件: TestTFileSeek.java
private void createTFile() throws IOException {
  long totalBytes = 0;
  FSDataOutputStream fout = createFSOutput(path, fs);
  try {
    Writer writer =
        new Writer(fout, options.minBlockSize, options.compress, "memcmp",
            conf);
    try {
      BytesWritable key = new BytesWritable();
      BytesWritable val = new BytesWritable();
      timer.start();
      for (long i = 0; true; ++i) {
        if (i % 1000 == 0) { // test the size for every 1000 rows.
          if (fs.getFileStatus(path).getLen() >= options.fileSize) {
            break;
          }
        }
        kvGen.next(key, val, false);
        writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
            .getSize());
        totalBytes += key.getSize();
        totalBytes += val.getSize();
      }
      timer.stop();
    }
    finally {
      writer.close();
    }
  }
  finally {
    fout.close();
  }
  double duration = (double)timer.read()/1000; // in us.
  long fsize = fs.getFileStatus(path).getLen();

  System.out.printf(
      "time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
      timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
          / duration);
  System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
      timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
 
源代码15 项目: RDFS   文件: TFile.java
/**
 * Copy the key into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual key size.
 * 
 * @param key
 *          BytesWritable to hold the key.
 * @throws IOException
 */
public int getKey(BytesWritable key) throws IOException {
  key.setSize(getKeyLength());
  getKey(key.get());
  return key.getSize();
}
 
源代码16 项目: RDFS   文件: ByteArray.java
/**
 * Constructing a ByteArray from a {@link BytesWritable}.
 * 
 * @param other
 */
public ByteArray(BytesWritable other) {
  this(other.get(), 0, other.getSize());
}
 
源代码17 项目: hadoop-gpu   文件: TFile.java
/**
 * Copy the key into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual key size.
 * 
 * @param key
 *          BytesWritable to hold the key.
 * @throws IOException
 */
public int getKey(BytesWritable key) throws IOException {
  key.setSize(getKeyLength());
  getKey(key.get());
  return key.getSize();
}
 
源代码18 项目: hadoop-gpu   文件: ByteArray.java
/**
 * Constructing a ByteArray from a {@link BytesWritable}.
 * 
 * @param other
 */
public ByteArray(BytesWritable other) {
  this(other.get(), 0, other.getSize());
}