下面列出了org.apache.hadoop.io.Text#writeString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public final void write(DataOutput out) throws IOException {
out.writeInt(size());
String[] values = null;
String[] names = names();
for (int i = 0; i < names.length; i++) {
Text.writeString(out, names[i]);
values = _getValues(names[i]);
int cnt = 0;
for (int j = 0; j < values.length; j++) {
if (values[j] != null)
cnt++;
}
out.writeInt(cnt);
for (int j = 0; j < values.length; j++) {
if (values[j] != null) {
Text.writeString(out, values[j]);
}
}
}
}
/** Write and flush the file header. */
private void writeFileHeader()
throws IOException {
out.write(VERSION);
Text.writeString(out, keyClass.getName());
Text.writeString(out, valClass.getName());
out.writeBoolean(this.isCompressed());
out.writeBoolean(this.isBlockCompressed());
if (this.isCompressed()) {
Text.writeString(out, (codec.getClass()).getName());
}
this.metadata.write(out);
out.write(sync); // write the sync bytes
out.flush(); // flush header
}
@Override
public void write(DataOutput out) throws IOException {
if (wrappedSplits == null) {
throw new TezUncheckedException("Wrapped splits cannot be empty");
}
Text.writeString(out, wrappedInputFormatName);
Text.writeString(out, wrappedSplits.get(0).getClass().getName());
out.writeInt(wrappedSplits.size());
for(InputSplit split : wrappedSplits) {
writeWrappedSplit(split, out);
}
out.writeLong(length);
if (locations == null || locations.length == 0) {
out.writeInt(0);
} else {
out.writeInt(locations.length);
for (String location : locations) {
Text.writeString(out, location);
}
}
}
public void write(DataOutput out) throws IOException {
taskid.write(out);
out.writeFloat(progress);
out.writeInt(numSlots);
WritableUtils.writeEnum(out, runState);
Text.writeString(out, diagnosticInfo);
Text.writeString(out, stateString);
WritableUtils.writeEnum(out, phase);
out.writeLong(startTime);
out.writeLong(finishTime);
out.writeBoolean(includeAllCounters);
out.writeLong(outputSize);
counters.write(out);
nextRecordRange.write(out);
}
public void write(DataOutput out) throws IOException {
jobid.write(out);
Text.writeString(out, jobFile);
Text.writeString(out, url);
Text.writeString(out, user);
Text.writeString(out, name);
Text.writeString(out, queueName);
}
public void write(DataOutput out) throws IOException {
jobid.write(out);
Text.writeString(out, jobFile);
Text.writeString(out, url);
Text.writeString(out, user);
Text.writeString(out, name);
Text.writeString(out, queueName);
}
public void authenticate(String digest, String challenge)
throws IOException {
LOG.debug("Sending AUTHENTICATION_REQ, digest=" + digest + ", challenge="
+ challenge);
WritableUtils.writeVInt(stream, MessageType.AUTHENTICATION_REQ.code);
Text.writeString(stream, digest);
Text.writeString(stream, challenge);
}
public synchronized void write(DataOutput out) throws IOException {
Text.writeString(out, displayName);
WritableUtils.writeVInt(out, subcounters.size());
for(Counter counter: subcounters.values()) {
counter.write(out);
}
}
private static SplitMetaInfo[] writeOldSplits(
org.apache.hadoop.mapred.InputSplit[] splits,
FSDataOutputStream out, Configuration conf) throws IOException {
SplitMetaInfo[] info = new SplitMetaInfo[splits.length];
if (splits.length != 0) {
int i = 0;
long offset = out.getPos();
int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
for(org.apache.hadoop.mapred.InputSplit split: splits) {
long prevLen = out.getPos();
Text.writeString(out, split.getClass().getName());
split.write(out);
long currLen = out.getPos();
String[] locations = split.getLocations();
if (locations.length > maxBlockLocations) {
LOG.warn("Max block location exceeded for split: "
+ split + " splitsize: " + locations.length +
" maxsize: " + maxBlockLocations);
locations = Arrays.copyOf(locations,maxBlockLocations);
}
info[i++] = new JobSplit.SplitMetaInfo(
locations, offset,
split.getLength());
offset += currLen - prevLen;
}
}
return info;
}
public void write(DataOutput out) throws IOException {
// splits
out.writeInt(splits.size());
for (FileSplit split : splits) {
Text.writeString(out, split.getPath().toString());
out.writeLong(split.getStart());
out.writeLong(split.getLength());
}
// length
out.writeLong(length);
// locations: not needed for serialization. See FileSplit.write().
}
public void write(DataOutput out)
throws IOException {
Text.writeString(out, url);
Text.writeString(out, anchor != null ? anchor : "");
out.writeFloat(score);
out.writeLong(timestamp);
out.writeByte(linkType);
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, locations.length);
for (int i = 0; i < locations.length; i++) {
Text.writeString(out, locations[i]);
}
WritableUtils.writeVLong(out, startOffset);
WritableUtils.writeVLong(out, inputDataLength);
}
/** {@inheritDoc} */
public void write(DataOutput output) throws IOException {
Text.writeString(output, this.lowerBoundClause);
Text.writeString(output, this.upperBoundClause);
}
public void write(DataOutput out) throws IOException {
Text.writeString(out, path.toString());
delegate.write(out);
}
protected static void writeString(DataOutput out, String s
) throws IOException {
boolean b = s != null;
out.writeBoolean(b);
if (b) {Text.writeString(out, s);}
}
protected static void writeString(DataOutput out, String s
) throws IOException {
boolean b = s != null;
out.writeBoolean(b);
if (b) {Text.writeString(out, s);}
}
public void write(DataOutput out) throws IOException {
input.write(out);
Text.writeString(out, output);
}
public void write(DataOutput out) throws IOException {
Text.writeString(out, mapId);
WritableUtils.writeVLong(out, compressedLength);
WritableUtils.writeVLong(out, uncompressedLength);
WritableUtils.writeVInt(out, forReduce);
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, dir);
}
public void setInputTypes(String keyType,
String valueType) throws IOException {
WritableUtils.writeVInt(stream, MessageType.SET_INPUT_TYPES.code);
Text.writeString(stream, keyType);
Text.writeString(stream, valueType);
}