org.apache.hadoop.io.compress.SplittableCompressionCodec#org.apache.hadoop.mapreduce.lib.input.SplitLineReader源码实例Demo

下面列出了org.apache.hadoop.io.compress.SplittableCompressionCodec#org.apache.hadoop.mapreduce.lib.input.SplitLineReader 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: LineRecordReader.java
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
 
源代码2 项目: hadoop   文件: LineRecordReader.java
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
 
源代码3 项目: big-c   文件: LineRecordReader.java
public LineRecordReader(InputStream in, long offset, long endOffset,
    int maxLineLength, byte[] recordDelimiter) {
  this.maxLineLength = maxLineLength;
  this.in = new SplitLineReader(in, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}
 
源代码4 项目: big-c   文件: LineRecordReader.java
public LineRecordReader(InputStream in, long offset, long endOffset, 
                        Configuration job, byte[] recordDelimiter)
  throws IOException{
  this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
    LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
  this.in = new SplitLineReader(in, job, recordDelimiter);
  this.start = offset;
  this.pos = offset;
  this.end = endOffset;    
  filePosition = null;
}