org.apache.hadoop.io.Text#writeString ( )源码实例Demo

下面列出了org.apache.hadoop.io.Text#writeString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: anthelion   文件: Metadata.java
public final void write(DataOutput out) throws IOException {
  out.writeInt(size());
  String[] values = null;
  String[] names = names();
  for (int i = 0; i < names.length; i++) {
    Text.writeString(out, names[i]);
    values = _getValues(names[i]);
    int cnt = 0;
    for (int j = 0; j < values.length; j++) {
      if (values[j] != null)
        cnt++;
    }
    out.writeInt(cnt);
    for (int j = 0; j < values.length; j++) {
      if (values[j] != null) {
        Text.writeString(out, values[j]);
      }
    }
  }
}
 
源代码2 项目: gemfirexd-oss   文件: SequenceFile.java
/** Write and flush the file header. */
private void writeFileHeader() 
  throws IOException {
  out.write(VERSION);
  Text.writeString(out, keyClass.getName());
  Text.writeString(out, valClass.getName());
  
  out.writeBoolean(this.isCompressed());
  out.writeBoolean(this.isBlockCompressed());
  
  if (this.isCompressed()) {
    Text.writeString(out, (codec.getClass()).getName());
  }
  this.metadata.write(out);
  out.write(sync);                       // write the sync bytes
  out.flush();                           // flush header
}
 
源代码3 项目: incubator-tez   文件: TezGroupedSplit.java
@Override
public void write(DataOutput out) throws IOException {
  if (wrappedSplits == null) {
    throw new TezUncheckedException("Wrapped splits cannot be empty");
  }

  Text.writeString(out, wrappedInputFormatName);
  Text.writeString(out, wrappedSplits.get(0).getClass().getName());
  out.writeInt(wrappedSplits.size());
  for(InputSplit split : wrappedSplits) {
    writeWrappedSplit(split, out);
  }
  out.writeLong(length);
  
  if (locations == null || locations.length == 0) {
    out.writeInt(0);
  } else {
    out.writeInt(locations.length);
    for (String location : locations) {
      Text.writeString(out, location);
    }
  }
}
 
源代码4 项目: hadoop   文件: TaskStatus.java
public void write(DataOutput out) throws IOException {
  taskid.write(out);
  out.writeFloat(progress);
  out.writeInt(numSlots);
  WritableUtils.writeEnum(out, runState);
  Text.writeString(out, diagnosticInfo);
  Text.writeString(out, stateString);
  WritableUtils.writeEnum(out, phase);
  out.writeLong(startTime);
  out.writeLong(finishTime);
  out.writeBoolean(includeAllCounters);
  out.writeLong(outputSize);
  counters.write(out);
  nextRecordRange.write(out);
}
 
源代码5 项目: RDFS   文件: JobProfile.java
public void write(DataOutput out) throws IOException {
  jobid.write(out);
  Text.writeString(out, jobFile);
  Text.writeString(out, url);
  Text.writeString(out, user);
  Text.writeString(out, name);
  Text.writeString(out, queueName);
}
 
源代码6 项目: hadoop   文件: JobProfile.java
public void write(DataOutput out) throws IOException {
  jobid.write(out);
  Text.writeString(out, jobFile);
  Text.writeString(out, url);
  Text.writeString(out, user);
  Text.writeString(out, name);
  Text.writeString(out, queueName);
}
 
源代码7 项目: hadoop   文件: BinaryProtocol.java
public void authenticate(String digest, String challenge)
    throws IOException {
  LOG.debug("Sending AUTHENTICATION_REQ, digest=" + digest + ", challenge="
      + challenge);
  WritableUtils.writeVInt(stream, MessageType.AUTHENTICATION_REQ.code);
  Text.writeString(stream, digest);
  Text.writeString(stream, challenge);
}
 
源代码8 项目: RDFS   文件: Counters.java
public synchronized void write(DataOutput out) throws IOException {
  Text.writeString(out, displayName);
  WritableUtils.writeVInt(out, subcounters.size());
  for(Counter counter: subcounters.values()) {
    counter.write(out);
  }
}
 
源代码9 项目: big-c   文件: JobSplitWriter.java
private static SplitMetaInfo[] writeOldSplits(
    org.apache.hadoop.mapred.InputSplit[] splits,
    FSDataOutputStream out, Configuration conf) throws IOException {
  SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
  if (splits.length != 0) {
    int i = 0;
    long offset = out.getPos();
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    for(org.apache.hadoop.mapred.InputSplit split: splits) {
      long prevLen = out.getPos();
      Text.writeString(out, split.getClass().getName());
      split.write(out);
      long currLen = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations,maxBlockLocations);
      }
      info[i++] = new JobSplit.SplitMetaInfo( 
          locations, offset,
          split.getLength());
      offset += currLen - prevLen;
    }
  }
  return info;
}
 
public void write(DataOutput out) throws IOException {
    // splits
    out.writeInt(splits.size());
    for (FileSplit split : splits) {
        Text.writeString(out, split.getPath().toString());  
        out.writeLong(split.getStart());
        out.writeLong(split.getLength());
    }
    // length
    out.writeLong(length);
    // locations: not needed for serialization.  See FileSplit.write().
}
 
源代码11 项目: nutch-htmlunit   文件: LinkDatum.java
public void write(DataOutput out)
  throws IOException {
  Text.writeString(out, url);
  Text.writeString(out, anchor != null ? anchor : "");
  out.writeFloat(score);
  out.writeLong(timestamp);
  out.writeByte(linkType);
}
 
源代码12 项目: big-c   文件: JobSplit.java
public void write(DataOutput out) throws IOException {
  WritableUtils.writeVInt(out, locations.length);
  for (int i = 0; i < locations.length; i++) {
    Text.writeString(out, locations[i]);
  }
  WritableUtils.writeVLong(out, startOffset);
  WritableUtils.writeVLong(out, inputDataLength);
}
 
源代码13 项目: hadoop   文件: DataDrivenDBInputFormat.java
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {
  Text.writeString(output, this.lowerBoundClause);
  Text.writeString(output, this.upperBoundClause);
}
 
public void write(DataOutput out) throws IOException {
    Text.writeString(out, path.toString());
    delegate.write(out);
}
 
源代码15 项目: hadoop-gpu   文件: DistTool.java
protected static void writeString(DataOutput out, String s
    ) throws IOException {
  boolean b = s != null;
  out.writeBoolean(b);
  if (b) {Text.writeString(out, s);}
}
 
源代码16 项目: RDFS   文件: DistTool.java
protected static void writeString(DataOutput out, String s
    ) throws IOException {
  boolean b = s != null;
  out.writeBoolean(b);
  if (b) {Text.writeString(out, s);}
}
 
源代码17 项目: hadoop   文件: DistCpV1.java
public void write(DataOutput out) throws IOException {
  input.write(out);
  Text.writeString(out, output);
}
 
源代码18 项目: incubator-tez   文件: ShuffleHeader.java
public void write(DataOutput out) throws IOException {
  Text.writeString(out, mapId);
  WritableUtils.writeVLong(out, compressedLength);
  WritableUtils.writeVLong(out, uncompressedLength);
  WritableUtils.writeVInt(out, forReduce);
}
 
源代码19 项目: linden   文件: Shard.java
@Override
public void write(DataOutput out) throws IOException {
  Text.writeString(out, dir);
}
 
源代码20 项目: RDFS   文件: BinaryProtocol.java
public void setInputTypes(String keyType, 
                          String valueType) throws IOException {
  WritableUtils.writeVInt(stream, MessageType.SET_INPUT_TYPES.code);
  Text.writeString(stream, keyType);
  Text.writeString(stream, valueType);
}