org.apache.hadoop.io.BytesWritable#setSize ( )源码实例Demo

下面列出了org.apache.hadoop.io.BytesWritable#setSize ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: TFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.getBytes(), size, remain);
      size += remain;
    }
    return value.getLength();
  } finally {
    dis.close();
  }
}
 
源代码2 项目: hadoop   文件: KVGenerator.java
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
  if (sorted
      && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
          .getSize()
          - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
源代码3 项目: hadoop-gpu   文件: TFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.get(), size, remain);
      size += remain;
    }
    return value.getSize();
  } finally {
    dis.close();
  }
}
 
源代码4 项目: big-c   文件: TFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.getBytes(), size, remain);
      size += remain;
    }
    return value.getLength();
  } finally {
    dis.close();
  }
}
 
源代码5 项目: big-c   文件: KVGenerator.java
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
  if (sorted
      && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
          .getSize()
          - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
源代码6 项目: attic-apex-malhar   文件: DTFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 *
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.getBytes(), size, remain);
      size += remain;
    }
    return value.getLength();
  } finally {
    dis.close();
  }
}
 
源代码7 项目: RDFS   文件: TFile.java
/**
 * Copy the value into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual value size. The implementation
 * directly uses the buffer inside BytesWritable for storing the value.
 * The call does not require the value length to be known.
 * 
 * @param value
 * @throws IOException
 */
public long getValue(BytesWritable value) throws IOException {
  DataInputStream dis = getValueStream();
  int size = 0;
  try {
    int remain;
    while ((remain = valueBufferInputStream.getRemain()) > 0) {
      value.setSize(size + remain);
      dis.readFully(value.get(), size, remain);
      size += remain;
    }
    return value.getSize();
  } finally {
    dis.close();
  }
}
 
源代码8 项目: RDFS   文件: KVGenerator.java
private void fillKey(BytesWritable o) {
  int len = keyLenRNG.nextInt();
  if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
  o.setSize(len);
  int n = MIN_KEY_LEN;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
  if (sorted
      && WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
          .getSize()
          - MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
    incrementPrefix();
  }

  System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
  lastKey.set(o);
}
 
源代码9 项目: hadoop   文件: KVGenerator.java
private void fillValue(BytesWritable o) {
  int len = valLenRNG.nextInt();
  o.setSize(len);
  int n = 0;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
}
 
源代码10 项目: hadoop   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码11 项目: RDFS   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码12 项目: big-c   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码13 项目: hadoop-gpu   文件: KeySampler.java
public void next(BytesWritable key) {
  key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
  random.nextBytes(key.get());
  int n = random.nextInt(max - min) + min;
  byte[] b = key.get();
  b[0] = (byte) (n >> 24);
  b[1] = (byte) (n >> 16);
  b[2] = (byte) (n >> 8);
  b[3] = (byte) n;
}
 
源代码14 项目: hbase   文件: KVGenerator.java
private void fillValue(BytesWritable o) {
  int len = valLenRNG.nextInt();
  o.setSize(len);
  int n = 0;
  while (n < len) {
    byte[] word = dict[random.nextInt(dict.length)];
    int l = Math.min(word.length, len - n);
    System.arraycopy(word, 0, o.get(), n, l);
    n += l;
  }
}
 
源代码15 项目: pxf   文件: CustomWritable.java
@Override
public void write(DataOutput out) throws IOException {
    // 0. Timestamp
    Text tms_text = new Text(tms);
    tms_text.write(out);

    // 1. num, int1, int2
    IntWritable intw = new IntWritable();

    for (int i = 0; i < num.length; i++) {
        intw.set(num[i]);
        intw.write(out);
    }

    intw.set(int1);
    intw.write(out);

    intw.set(int2);
    intw.write(out);

    // 2. st1
    Text txt = new Text();

    for (int i = 0; i < strings.length; i++) {
        txt.set(strings[i]);
        txt.write(out);
    }

    txt.set(st1);
    txt.write(out);

    // 3. doubles
    DoubleWritable dw = new DoubleWritable();
    for (int i = 0; i < dubs.length; i++) {
        dw.set(dubs[i]);
        dw.write(out);
    }

    dw.set(db);
    dw.write(out);

    // 4. floats
    FloatWritable fw = new FloatWritable();
    for (int i = 0; i < fts.length; i++) {
        fw.set(fts[i]);
        fw.write(out);
    }

    fw.set(ft);
    fw.write(out);

    // 5. longs
    LongWritable lw = new LongWritable();
    for (int i = 0; i < lngs.length; i++) {
        lw.set(lngs[i]);
        lw.write(out);
    }
    lw.set(lng);
    lw.write(out);

    // 6. booleans
    BooleanWritable bw = new BooleanWritable();
    for (int i = 0; i < bools.length; ++i) {
        bw.set(bools[i]);
        bw.write(out);
    }
    bw.set(bool);
    bw.write(out);

    // 7. shorts
    ShortWritable sw = new ShortWritable();
    for (int i = 0; i < shrts.length; ++i) {
        sw.set(shrts[i]);
        sw.write(out);
    }
    sw.set(shrt);
    sw.write(out);

    // 8. bytes
    // BytesWritable btsw = new BytesWritable(bts);
    // btsw.write(out);
    BytesWritable btsw = new BytesWritable();
    btsw.setCapacity(bts.length);
    btsw.setSize(bts.length);
    btsw.set(bts, 0, bts.length);
    btsw.write(out);
}
 
源代码16 项目: hadoop   文件: BigMapOutput.java
private static void createBigMapInputFile(Configuration conf, FileSystem fs, 
                                          Path dir, long fileSizeInMB) 
throws IOException {
  // Check if the input path exists and is non-empty
  if (fs.exists(dir)) {
    FileStatus[] list = fs.listStatus(dir);
    if (list.length > 0) {
      throw new IOException("Input path: " + dir + " already exists... ");
    }
  }
  
  Path file = new Path(dir, "part-0");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              BytesWritable.class, BytesWritable.class,
                              CompressionType.NONE);
  long numBytesToWrite = fileSizeInMB * 1024 * 1024;
  int minKeySize = conf.getInt(MIN_KEY, 10);;
  int keySizeRange = 
    conf.getInt(MAX_KEY, 1000) - minKeySize;
  int minValueSize = conf.getInt(MIN_VALUE, 0);
  int valueSizeRange = 
    conf.getInt(MAX_VALUE, 20000) - minValueSize;
  BytesWritable randomKey = new BytesWritable();
  BytesWritable randomValue = new BytesWritable();

  LOG.info("Writing " + numBytesToWrite + " bytes to " + file + " with " +
           "minKeySize: " + minKeySize + " keySizeRange: " + keySizeRange +
           " minValueSize: " + minValueSize + " valueSizeRange: " + valueSizeRange);
  long start = System.currentTimeMillis();
  while (numBytesToWrite > 0) {
    int keyLength = minKeySize + 
      (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
    randomKey.setSize(keyLength);
    randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
    int valueLength = minValueSize +
      (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
    randomValue.setSize(valueLength);
    randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
    writer.append(randomKey, randomValue);
    numBytesToWrite -= keyLength + valueLength;
  }
  writer.close();
  long end = System.currentTimeMillis();

  LOG.info("Created " + file + " of size: " + fileSizeInMB + "MB in " + 
           (end-start)/1000 + "secs");
}
 
源代码17 项目: big-c   文件: BigMapOutput.java
private static void createBigMapInputFile(Configuration conf, FileSystem fs, 
                                          Path dir, long fileSizeInMB) 
throws IOException {
  // Check if the input path exists and is non-empty
  if (fs.exists(dir)) {
    FileStatus[] list = fs.listStatus(dir);
    if (list.length > 0) {
      throw new IOException("Input path: " + dir + " already exists... ");
    }
  }
  
  Path file = new Path(dir, "part-0");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              BytesWritable.class, BytesWritable.class,
                              CompressionType.NONE);
  long numBytesToWrite = fileSizeInMB * 1024 * 1024;
  int minKeySize = conf.getInt(MIN_KEY, 10);;
  int keySizeRange = 
    conf.getInt(MAX_KEY, 1000) - minKeySize;
  int minValueSize = conf.getInt(MIN_VALUE, 0);
  int valueSizeRange = 
    conf.getInt(MAX_VALUE, 20000) - minValueSize;
  BytesWritable randomKey = new BytesWritable();
  BytesWritable randomValue = new BytesWritable();

  LOG.info("Writing " + numBytesToWrite + " bytes to " + file + " with " +
           "minKeySize: " + minKeySize + " keySizeRange: " + keySizeRange +
           " minValueSize: " + minValueSize + " valueSizeRange: " + valueSizeRange);
  long start = System.currentTimeMillis();
  while (numBytesToWrite > 0) {
    int keyLength = minKeySize + 
      (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
    randomKey.setSize(keyLength);
    randomizeBytes(randomKey.getBytes(), 0, randomKey.getLength());
    int valueLength = minValueSize +
      (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
    randomValue.setSize(valueLength);
    randomizeBytes(randomValue.getBytes(), 0, randomValue.getLength());
    writer.append(randomKey, randomValue);
    numBytesToWrite -= keyLength + valueLength;
  }
  writer.close();
  long end = System.currentTimeMillis();

  LOG.info("Created " + file + " of size: " + fileSizeInMB + "MB in " + 
           (end-start)/1000 + "secs");
}
 
源代码18 项目: hadoop   文件: TFile.java
/**
 * Copy the key into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual key size.
 * 
 * @param key
 *          BytesWritable to hold the key.
 * @throws IOException
 */
public int getKey(BytesWritable key) throws IOException {
  key.setSize(getKeyLength());
  getKey(key.getBytes());
  return key.getLength();
}
 
源代码19 项目: attic-apex-malhar   文件: DTFile.java
/**
 * Copy the key into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual key size.
 *
 * @param key
 *          BytesWritable to hold the key.
 * @throws IOException
 */
public int getKey(BytesWritable key) throws IOException {
  key.setSize(getKeyLength());
  getKey(key.getBytes());
  return key.getLength();
}
 
源代码20 项目: RDFS   文件: TFile.java
/**
 * Copy the key into BytesWritable. The input BytesWritable will be
 * automatically resized to the actual key size.
 * 
 * @param key
 *          BytesWritable to hold the key.
 * @throws IOException
 */
public int getKey(BytesWritable key) throws IOException {
  key.setSize(getKeyLength());
  getKey(key.get());
  return key.getSize();
}