org.apache.hadoop.io.BytesWritable#get ( )源码实例Demo

下面列出了org.apache.hadoop.io.BytesWritable#get ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: RDFS   文件: BCFile.java
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
源代码2 项目: emr-sample-apps   文件: AlignmentRecord.java
public void fromBytes(BytesWritable t)
{
	byte [] raw = t.get();
	
	m_isRC   = raw[0] == 1;
	
	m_refID = (raw[1] & 0xFF) << 24 
	        | (raw[2] & 0xFF) << 16
	        | (raw[3] & 0xFF) << 8
	        | (raw[4] & 0xFF);
	
	m_refStart = (raw[5] & 0xFF) << 24 
	           | (raw[6] & 0xFF) << 16
	           | (raw[7] & 0xFF) << 8
	           | (raw[8] & 0xFF);
	
	m_refEnd = (raw[9] & 0xFF) << 24 
	         | (raw[10] & 0xFF) << 16
	         | (raw[11] & 0xFF) << 8
	         | (raw[12] & 0xFF);
	
	m_differences = (raw[13] & 0xFF) << 24 
	              | (raw[14] & 0xFF) << 16
	              | (raw[15] & 0xFF) << 8
	              | (raw[16] & 0xFF);
}
 
源代码3 项目: hadoop-gpu   文件: BCFile.java
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
源代码4 项目: hadoop   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码5 项目: big-c   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码6 项目: hbase   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int rnd = 0;
  if (max != min) {
    rnd = random.nextInt(max - min);
  }
  int n = rnd + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码7 项目: RDFS   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码8 项目: emr-sample-apps   文件: MerRecord.java
public void fromBytes(BytesWritable t)
{
	byte [] raw = t.get();
	int rawlen = t.getSize();
	
	//sbuffer[0] = (byte) ((isReference ? 0x01 : 0x00) | (isRC ? 0x10 : 0x00));
	
	isReference = (raw[0] & 0x01) == 0x01;
	isRC        = (raw[0] & 0x10) == 0x10;
	
	offset = (raw[1] & 0xFF) << 24 
	       | (raw[2] & 0xFF) << 16
	       | (raw[3] & 0xFF) << 8
	       | (raw[4] & 0xFF);
	
	id = (raw[5] & 0xFF) << 24 
          | (raw[6] & 0xFF) << 16
          | (raw[7] & 0xFF) << 8
          | (raw[8] & 0xFF);

	int fieldstart = 9;
	
	for (int i = fieldstart; i < rawlen; i++)
	{
		if (raw[i] == DNAString.hardstop)
		{
			//leftFlank = DNAString.dnaToArr(raw, fieldstart, i-fieldstart);
			leftFlank = new byte[i-fieldstart];
			System.arraycopy(raw, fieldstart, leftFlank, 0, i-fieldstart);
			
			fieldstart = i+1; // skip the hardstop
			break;
		}
	}
	
	rightFlank = new byte[rawlen - fieldstart];
	System.arraycopy(raw, fieldstart, rightFlank, 0, rawlen-fieldstart);
	//rightFlank = DNAString.dnaToArr(raw, fieldstart, rawlen-fieldstart);
}
 
源代码9 项目: emr-sample-apps   文件: FastaRecord.java
public void fromBytes(BytesWritable t)
{
	byte [] raw = t.get();
	int rawlen = t.getSize();
	
	m_lastChunk   = raw[0] == 1;
	
	m_offset =   (raw[1] & 0xFF) << 24 
	           | (raw[2] & 0xFF) << 16
	           | (raw[3] & 0xFF) << 8
	           | (raw[4] & 0xFF);
	
	int sl = rawlen - 5;
	m_sequence = DNAString.dnaToArr(raw, 5, sl);
}
 
源代码10 项目: hadoop-gpu   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码11 项目: RDFS   文件: ByteArray.java
/**
 * Constructing a ByteArray from a {@link BytesWritable}.
 * 
 * @param other
 */
public ByteArray(BytesWritable other) {
  this(other.get(), 0, other.getSize());
}
 
源代码12 项目: hadoop-gpu   文件: ByteArray.java
/**
 * Constructing a ByteArray from a {@link BytesWritable}.
 * 
 * @param other
 */
public ByteArray(BytesWritable other) {
  this(other.get(), 0, other.getSize());
}