类org.apache.hadoop.io.file.tfile.Chunk.ChunkEncoder源码实例Demo

下面列出了怎么用org.apache.hadoop.io.file.tfile.Chunk.ChunkEncoder的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: TFile.java

/**
 * Obtain an output stream for writing a value into TFile. This may only be
 * called right after a key appending operation (the key append stream must
 * be closed).
 * 
 * @param length
 *          The expected length of the value. If length of the value is not
 *          known, set length = -1. Otherwise, the application must write
 *          exactly as many bytes as specified here before calling close on
 *          the returned output stream. Advertising the value size up-front
 *          guarantees that the value is encoded in one chunk, and avoids
 *          intermediate chunk buffering.
 * @throws IOException
 * 
 */
public DataOutputStream prepareAppendValue(int length) throws IOException {
  if (state != State.END_KEY) {
    throw new IllegalStateException(
        "Incorrect state to start a new value: " + state.name());
  }

  DataOutputStream ret;

  // unknown length
  if (length < 0) {
    if (valueBuffer == null) {
      valueBuffer = new byte[getChunkBufferSize(conf)];
    }
    ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
  } else {
    ret =
        new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
  }

  state = State.IN_VALUE;
  return ret;
}
 
源代码2 项目: big-c   文件: TFile.java

/**
 * Obtain an output stream for writing a value into TFile. This may only be
 * called right after a key appending operation (the key append stream must
 * be closed).
 * 
 * @param length
 *          The expected length of the value. If length of the value is not
 *          known, set length = -1. Otherwise, the application must write
 *          exactly as many bytes as specified here before calling close on
 *          the returned output stream. Advertising the value size up-front
 *          guarantees that the value is encoded in one chunk, and avoids
 *          intermediate chunk buffering.
 * @throws IOException
 * 
 */
public DataOutputStream prepareAppendValue(int length) throws IOException {
  if (state != State.END_KEY) {
    throw new IllegalStateException(
        "Incorrect state to start a new value: " + state.name());
  }

  DataOutputStream ret;

  // unknown length
  if (length < 0) {
    if (valueBuffer == null) {
      valueBuffer = new byte[getChunkBufferSize(conf)];
    }
    ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
  } else {
    ret =
        new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
  }

  state = State.IN_VALUE;
  return ret;
}
 
源代码3 项目: attic-apex-malhar   文件: DTFile.java

/**
 * Obtain an output stream for writing a value into TFile. This may only be
 * called right after a key appending operation (the key append stream must
 * be closed).
 *
 * @param length
 *          The expected length of the value. If length of the value is not
 *          known, set length = -1. Otherwise, the application must write
 *          exactly as many bytes as specified here before calling close on
 *          the returned output stream. Advertising the value size up-front
 *          guarantees that the value is encoded in one chunk, and avoids
 *          intermediate chunk buffering.
 * @throws IOException
 *
 */
public DataOutputStream prepareAppendValue(int length) throws IOException {
  if (state != State.END_KEY) {
    throw new IllegalStateException(
        "Incorrect state to start a new value: " + state.name());
  }

  DataOutputStream ret;

  // unknown length
  if (length < 0) {
    if (valueBuffer == null) {
      valueBuffer = new byte[getChunkBufferSize(conf)];
    }
    ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
  } else {
    ret =
        new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
  }

  state = State.IN_VALUE;
  return ret;
}
 
源代码4 项目: RDFS   文件: TFile.java

/**
 * Obtain an output stream for writing a value into TFile. This may only be
 * called right after a key appending operation (the key append stream must
 * be closed).
 * 
 * @param length
 *          The expected length of the value. If length of the value is not
 *          known, set length = -1. Otherwise, the application must write
 *          exactly as many bytes as specified here before calling close on
 *          the returned output stream. Advertising the value size up-front
 *          guarantees that the value is encoded in one chunk, and avoids
 *          intermediate chunk buffering.
 * @throws IOException
 * 
 */
public DataOutputStream prepareAppendValue(int length) throws IOException {
  if (state != State.END_KEY) {
    throw new IllegalStateException(
        "Incorrect state to start a new value: " + state.name());
  }

  DataOutputStream ret;

  // unknown length
  if (length < 0) {
    if (valueBuffer == null) {
      valueBuffer = new byte[getChunkBufferSize(conf)];
    }
    ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
  } else {
    ret =
        new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
  }

  state = State.IN_VALUE;
  return ret;
}
 
源代码5 项目: hadoop-gpu   文件: TFile.java

/**
 * Obtain an output stream for writing a value into TFile. This may only be
 * called right after a key appending operation (the key append stream must
 * be closed).
 * 
 * @param length
 *          The expected length of the value. If length of the value is not
 *          known, set length = -1. Otherwise, the application must write
 *          exactly as many bytes as specified here before calling close on
 *          the returned output stream. Advertising the value size up-front
 *          guarantees that the value is encoded in one chunk, and avoids
 *          intermediate chunk buffering.
 * @throws IOException
 * 
 */
public DataOutputStream prepareAppendValue(int length) throws IOException {
  if (state != State.END_KEY) {
    throw new IllegalStateException(
        "Incorrect state to start a new value: " + state.name());
  }

  DataOutputStream ret;

  // unknown length
  if (length < 0) {
    if (valueBuffer == null) {
      valueBuffer = new byte[getChunkBufferSize(conf)];
    }
    ret = new ValueRegister(new ChunkEncoder(blkAppender, valueBuffer));
  } else {
    ret =
        new ValueRegister(new Chunk.SingleChunkEncoder(blkAppender, length));
  }

  state = State.IN_VALUE;
  return ret;
}
 
 类所在包
 类方法
 同包方法