org.apache.hadoop.io.UTF8#toString ( )源码实例Demo

下面列出了org.apache.hadoop.io.UTF8#toString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop-gpu   文件: IOMapperBase.java
/**
 * Map file name and offset into statistical data.
 * <p>
 * The map task is to get the 
 * <tt>key</tt>, which contains the file name, and the 
 * <tt>value</tt>, which is the offset within the file.
 * 
 * The parameters are passed to the abstract method 
 * {@link #doIO(Reporter,String,long)}, which performs the io operation, 
 * usually read or write data, and then 
 * {@link #collectStats(OutputCollector,String,long,Object)} 
 * is called to prepare stat data for a subsequent reducer.
 */
public void map(UTF8 key, 
                LongWritable value,
                OutputCollector<UTF8, UTF8> output, 
                Reporter reporter) throws IOException {
  String name = key.toString();
  long longValue = value.get();
  
  reporter.setStatus("starting " + name + " ::host = " + hostName);
  
  long tStart = System.currentTimeMillis();
  Object statValue = doIO(reporter, name, longValue);
  long tEnd = System.currentTimeMillis();
  long execTime = tEnd - tStart;
  collectStats(output, name, execTime, statValue);
  
  reporter.setStatus("finished " + name + " ::host = " + hostName);
}
 
源代码2 项目: RDFS   文件: TestFileSystem.java
public void map(UTF8 key, LongWritable value,
                OutputCollector<UTF8, LongWritable> collector,
                Reporter reporter)
  throws IOException {
  
  String name = key.toString();
  long size = value.get();
  long seed = Long.parseLong(name);

  random.setSeed(seed);
  reporter.setStatus("creating " + name);

  // write to temp file initially to permit parallel execution
  Path tempFile = new Path(DATA_DIR, name+suffix);
  OutputStream out = fs.create(tempFile);

  long written = 0;
  try {
    while (written < size) {
      if (fastCheck) {
        Arrays.fill(buffer, (byte)random.nextInt(Byte.MAX_VALUE));
      } else {
        random.nextBytes(buffer);
      }
      long remains = size - written;
      int length = (remains<=buffer.length) ? (int)remains : buffer.length;
      out.write(buffer, 0, length);
      written += length;
      reporter.setStatus("writing "+name+"@"+written+"/"+size);
    }
  } finally {
    out.close();
  }
  // rename to final location
  fs.rename(tempFile, new Path(DATA_DIR, name));

  collector.collect(new UTF8("bytes"), new LongWritable(written));

  reporter.setStatus("wrote " + name);
}
 
源代码3 项目: hadoop-gpu   文件: TestFileSystem.java
public void map(UTF8 key, LongWritable value,
                OutputCollector<UTF8, LongWritable> collector,
                Reporter reporter)
  throws IOException {
  
  String name = key.toString();
  long size = value.get();
  long seed = Long.parseLong(name);

  random.setSeed(seed);
  reporter.setStatus("creating " + name);

  // write to temp file initially to permit parallel execution
  Path tempFile = new Path(DATA_DIR, name+suffix);
  OutputStream out = fs.create(tempFile);

  long written = 0;
  try {
    while (written < size) {
      if (fastCheck) {
        Arrays.fill(buffer, (byte)random.nextInt(Byte.MAX_VALUE));
      } else {
        random.nextBytes(buffer);
      }
      long remains = size - written;
      int length = (remains<=buffer.length) ? (int)remains : buffer.length;
      out.write(buffer, 0, length);
      written += length;
      reporter.setStatus("writing "+name+"@"+written+"/"+size);
    }
  } finally {
    out.close();
  }
  // rename to final location
  fs.rename(tempFile, new Path(DATA_DIR, name));

  collector.collect(new UTF8("bytes"), new LongWritable(written));

  reporter.setStatus("wrote " + name);
}
 
源代码4 项目: hadoop-gpu   文件: AccumulatingReducer.java
public void reduce(UTF8 key, 
                   Iterator<UTF8> values,
                   OutputCollector<UTF8, UTF8> output, 
                   Reporter reporter
                   ) throws IOException {
  String field = key.toString();

  reporter.setStatus("starting " + field + " ::host = " + hostName);

  // concatenate strings
  if (field.startsWith("s:")) {
    String sSum = "";
    while (values.hasNext())
      sSum += values.next().toString() + ";";
    output.collect(key, new UTF8(sSum));
    reporter.setStatus("finished " + field + " ::host = " + hostName);
    return;
  }
  // sum long values
  if (field.startsWith("f:")) {
    float fSum = 0;
    while (values.hasNext())
      fSum += Float.parseFloat(values.next().toString());
    output.collect(key, new UTF8(String.valueOf(fSum)));
    reporter.setStatus("finished " + field + " ::host = " + hostName);
    return;
  }
  // sum long values
  if (field.startsWith("l:")) {
    long lSum = 0;
    while (values.hasNext()) {
      lSum += Long.parseLong(values.next().toString());
    }
    output.collect(key, new UTF8(String.valueOf(lSum)));
  }
  reporter.setStatus("finished " + field + " ::host = " + hostName);
}
 
源代码5 项目: RDFS   文件: TestFileSystem.java
public void map(UTF8 key, LongWritable value,
                OutputCollector<UTF8, LongWritable> collector,
                Reporter reporter)
  throws IOException {
  
  String name = key.toString();
  long size = value.get();
  long seed = Long.parseLong(name);

  random.setSeed(seed);
  reporter.setStatus("opening " + name);

  DataInputStream in =
    new DataInputStream(fs.open(new Path(DATA_DIR, name)));

  long read = 0;
  try {
    while (read < size) {
      long remains = size - read;
      int n = (remains<=buffer.length) ? (int)remains : buffer.length;
      in.readFully(buffer, 0, n);
      read += n;
      if (fastCheck) {
        Arrays.fill(check, (byte)random.nextInt(Byte.MAX_VALUE));
      } else {
        random.nextBytes(check);
      }
      if (n != buffer.length) {
        Arrays.fill(buffer, n, buffer.length, (byte)0);
        Arrays.fill(check, n, check.length, (byte)0);
      }
      assertTrue(Arrays.equals(buffer, check));

      reporter.setStatus("reading "+name+"@"+read+"/"+size);

    }
  } finally {
    in.close();
  }

  collector.collect(new UTF8("bytes"), new LongWritable(read));

  reporter.setStatus("read " + name);
}
 
源代码6 项目: RDFS   文件: FSImageSerialization.java
@SuppressWarnings("deprecation")
public static String readString(DataInputStream in) throws IOException {
  UTF8 ustr = TL_DATA.get().U_STR;
  ustr.readFields(in);
  return ustr.toString();
}
 
源代码7 项目: hadoop-gpu   文件: TestFileSystem.java
public void map(UTF8 key, LongWritable value,
                OutputCollector<UTF8, LongWritable> collector,
                Reporter reporter)
  throws IOException {
  
  String name = key.toString();
  long size = value.get();
  long seed = Long.parseLong(name);

  random.setSeed(seed);
  reporter.setStatus("opening " + name);

  DataInputStream in =
    new DataInputStream(fs.open(new Path(DATA_DIR, name)));

  long read = 0;
  try {
    while (read < size) {
      long remains = size - read;
      int n = (remains<=buffer.length) ? (int)remains : buffer.length;
      in.readFully(buffer, 0, n);
      read += n;
      if (fastCheck) {
        Arrays.fill(check, (byte)random.nextInt(Byte.MAX_VALUE));
      } else {
        random.nextBytes(check);
      }
      if (n != buffer.length) {
        Arrays.fill(buffer, n, buffer.length, (byte)0);
        Arrays.fill(check, n, check.length, (byte)0);
      }
      assertTrue(Arrays.equals(buffer, check));

      reporter.setStatus("reading "+name+"@"+read+"/"+size);

    }
  } finally {
    in.close();
  }

  collector.collect(new UTF8("bytes"), new LongWritable(read));

  reporter.setStatus("read " + name);
}