org.apache.hadoop.io.WritableUtils#writeVInt ( )源码实例Demo

下面列出了org.apache.hadoop.io.WritableUtils#writeVInt ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: presto   文件: TestRcFileDecoderUtils.java
private static Slice writeVintOld(SliceOutput output, long value)
        throws IOException
{
    output.reset();
    WritableUtils.writeVLong(output, value);
    Slice vLongOld = Slices.copyOf(output.slice());

    output.reset();
    RcFileDecoderUtils.writeVLong(output, value);
    Slice vLongNew = Slices.copyOf(output.slice());
    assertEquals(vLongNew, vLongOld);

    if (value == (int) value) {
        output.reset();
        WritableUtils.writeVInt(output, (int) value);
        Slice vIntOld = Slices.copyOf(output.slice());
        assertEquals(vIntOld, vLongOld);

        output.reset();
        RcFileDecoderUtils.writeVInt(output, (int) value);
        Slice vIntNew = Slices.copyOf(output.slice());
        assertEquals(vIntNew, vLongOld);
    }
    return vLongOld;
}
 
源代码2 项目: RDFS   文件: IFile.java
public void append(byte[] kvBuffer, int offset, int keyLength,
    int valueLength)
    throws IOException {
  int realKeyLen = keyLength + WritableUtils.INT_LENGTH_BYTES;
  int realValLen = valueLength + WritableUtils.INT_LENGTH_BYTES;

  WritableUtils.writeVInt(buffer, realKeyLen);
  WritableUtils.writeVInt(buffer, realValLen);
  //this is real key: keyLength + key
  buffer.writeInt(keyLength);
  buffer.write(kvBuffer, offset, keyLength);
  //this is real value: 
  buffer.writeInt(valueLength);
  buffer.write(kvBuffer, offset + keyLength, valueLength);

  out.write(buffer.getData(), 0, buffer.getLength());
  buffer.reset();

  // Update bytes written
  decompressedBytesWritten += realKeyLen + realValLen
      + WritableUtils.getVIntSize(realKeyLen)
      + WritableUtils.getVIntSize(realValLen);
  ++numRecordsWritten;
}
 
源代码3 项目: hadoop   文件: FSImageSerialization.java
/**
 * Write an array of blocks as compactly as possible. This uses
 * delta-encoding for the generation stamp and size, following
 * the principle that genstamp increases relatively slowly,
 * and size is equal for all but the last block of a file.
 */
public static void writeCompactBlockArray(
    Block[] blocks, DataOutputStream out) throws IOException {
  WritableUtils.writeVInt(out, blocks.length);
  Block prev = null;
  for (Block b : blocks) {
    long szDelta = b.getNumBytes() -
        (prev != null ? prev.getNumBytes() : 0);
    long gsDelta = b.getGenerationStamp() -
        (prev != null ? prev.getGenerationStamp() : 0);
    out.writeLong(b.getBlockId()); // blockid is random
    WritableUtils.writeVLong(out, szDelta);
    WritableUtils.writeVLong(out, gsDelta);
    prev = b;
  }
}
 
源代码4 项目: big-c   文件: AbstractCounterGroup.java
/**
 * GenericGroup ::= displayName #counter counter*
 */
@Override
public synchronized void write(DataOutput out) throws IOException {
  Text.writeString(out, displayName);
  WritableUtils.writeVInt(out, counters.size());
  for(Counter counter: counters.values()) {
    counter.write(out);
  }
}
 
源代码5 项目: flink   文件: Configuration.java
@Override
public void write(DataOutput out) throws IOException {
  Properties props = getProps();
  WritableUtils.writeVInt(out, props.size());
  for(Entry<Object, Object> item: props.entrySet()) {
    org.apache.hadoop.io.Text.writeString(out, (String) item.getKey());
    org.apache.hadoop.io.Text.writeString(out, (String) item.getValue());
    WritableUtils.writeCompressedStringArray(out,
        updatingResource.get(item.getKey()));
  }
}
 
源代码6 项目: phoenix   文件: OrderByExpression.java
@Override
public void write(DataOutput output) throws IOException {
    output.writeBoolean(isNullsLast);
    output.writeBoolean(isAscending);
    WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
    expression.write(output);
}
 
源代码7 项目: hadoop   文件: InMemoryWriter.java
public void close() throws IOException {
  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, IFile.EOF_MARKER);
  WritableUtils.writeVInt(out, IFile.EOF_MARKER);
  
  // Close the stream 
  out.close();
  out = null;
}
 
源代码8 项目: datawave   文件: Attributes.java
@Override
public void write(DataOutput out, boolean reducedResponse) throws IOException {
    WritableUtils.writeVInt(out, _count);
    out.writeBoolean(trackSizes);
    // Write out the number of Attributes we're going to store
    WritableUtils.writeVInt(out, this.attributes.size());
    
    for (Attribute<? extends Comparable<?>> attr : this.attributes) {
        // Write out the concrete Attribute class
        WritableUtils.writeString(out, attr.getClass().getName());
        
        // Defer to the concrete instance to write() itself
        attr.write(out, reducedResponse);
    }
}
 
源代码9 项目: tez   文件: IFile.java
protected void writeValue(byte[] data, int offset, int length) throws IOException {
  writeRLE(out);
  WritableUtils.writeVInt(out, length); // value length
  out.write(data, offset, length);
  // Update bytes written
  decompressedBytesWritten +=
      length + WritableUtils.getVIntSize(length);
  if (serializedUncompressedBytes != null) {
    serializedUncompressedBytes.increment(length);
  }
  totalKeySaving++;
}
 
源代码10 项目: gemfirexd-oss   文件: SequenceFile.java
/** Append a key/value pair. */
@Override
@SuppressWarnings("unchecked")
public synchronized void append(Object key, Object val)
  throws IOException {
  if (key.getClass() != keyClass)
    throw new IOException("wrong key class: "+key+" is not "+keyClass);
  if (val.getClass() != valClass)
    throw new IOException("wrong value class: "+val+" is not "+valClass);

  // Save key/value into respective buffers 
  int oldKeyLength = keyBuffer.getLength();
  keySerializer.serialize(key);
  int keyLength = keyBuffer.getLength() - oldKeyLength;
  if (keyLength < 0)
    throw new IOException("negative length keys not allowed: " + key);
  WritableUtils.writeVInt(keyLenBuffer, keyLength);

  int oldValLength = valBuffer.getLength();
  uncompressedValSerializer.serialize(val);
  int valLength = valBuffer.getLength() - oldValLength;
  WritableUtils.writeVInt(valLenBuffer, valLength);
  
  // Added another key/value pair
  ++noBufferedRecords;
  
  // Compress and flush?
  int currentBlockSize = keyBuffer.getLength() + valBuffer.getLength();
  if (currentBlockSize >= compressionBlockSize) {
    sync();
  }
}
 
源代码11 项目: incubator-tez   文件: AbstractCounterGroup.java
/**
 * GenericGroup ::= displayName #counter counter*
 */
@Override
public synchronized void write(DataOutput out) throws IOException {
  Text.writeString(out, displayName);
  WritableUtils.writeVInt(out, counters.size());
  for(TezCounter counter: counters.values()) {
    counter.write(out);
  }
}
 
源代码12 项目: hadoop   文件: DelegationKey.java
/**
 */
@Override
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, keyId);
  WritableUtils.writeVLong(out, expiryDate);
  if (keyBytes == null) {
    WritableUtils.writeVInt(out, -1);
  } else {
    WritableUtils.writeVInt(out, keyBytes.length);
    out.write(keyBytes);
  }
}
 
源代码13 项目: phoenix   文件: ValueSchema.java
@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeVInt(output, (type.ordinal() + 1) * (this.isNullable ? -1 : 1));
    WritableUtils.writeVInt(output, count * (columnModifier == null ? 1 : -1));
    if (type.isFixedWidth() && type.getByteSize() == null) {
        WritableUtils.writeVInt(output, byteSize);
    }
}
 
源代码14 项目: phoenix   文件: BaseSingleExpression.java
@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeVInt(output, ExpressionType.valueOf(children.get(0)).ordinal());
    children.get(0).write(output);
}
 
源代码15 项目: hadoop   文件: PipeReducerStub.java
public void binaryProtocolStub() {
  try {

    initSoket();

    //should be 5
    //RUN_REDUCE boolean 
    WritableUtils.readVInt(dataInput);
    WritableUtils.readVInt(dataInput);
    int intValue = WritableUtils.readVInt(dataInput);
    System.out.println("getIsJavaRecordWriter:" + intValue);

    // reduce key
    WritableUtils.readVInt(dataInput);
    // value of reduce key
    BooleanWritable value = new BooleanWritable();
    readObject(value, dataInput);
    System.out.println("reducer key :" + value);
    // reduce value code:

    // reduce values
    while ((intValue = WritableUtils.readVInt(dataInput)) == 7) {
      Text txt = new Text();
      // value
      readObject(txt, dataInput);
      System.out.println("reduce value  :" + txt);
    }


    // done
    WritableUtils.writeVInt(dataOut, 54);

    dataOut.flush();
    dataOut.close();

  } catch (Exception x) {
    x.printStackTrace();
  } finally {
    closeSoket();

  }
}
 
源代码16 项目: 163-bigdate-note   文件: LogGenericWritable.java
public void write(DataOutput out) throws IOException {
    WritableUtils.writeVInt(out, name.length);
    for (int i = 0; i < name.length; i++) {
        datum[i].write(out);
    }
}
 
源代码17 项目: phoenix   文件: BaseSingleExpression.java
@Override
public void write(DataOutput output) throws IOException {
    WritableUtils.writeVInt(output, ExpressionType.valueOf(children.get(0)).ordinal());
    children.get(0).write(output);
}
 
源代码18 项目: hadoop   文件: ShuffleHeader.java
public void write(DataOutput out) throws IOException {
  Text.writeString(out, mapId);
  WritableUtils.writeVLong(out, compressedLength);
  WritableUtils.writeVLong(out, uncompressedLength);
  WritableUtils.writeVInt(out, forReduce);
}
 
源代码19 项目: RDFS   文件: TestMiniMRLocalFS.java
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, first);
  WritableUtils.writeVInt(out, length);
}
 
源代码20 项目: hadoop   文件: BinaryProtocol.java
public void abort() throws IOException {
  WritableUtils.writeVInt(stream, MessageType.ABORT.code);
  LOG.debug("Sent abort command");
}