org.apache.hadoop.io.SequenceFile#CompressionType ( )源码实例Demo

下面列出了org.apache.hadoop.io.SequenceFile#CompressionType ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hadoop   文件: TestMapRed.java
@Test 
public void testCompression() throws Exception {
  EnumSet<SequenceFile.CompressionType> seq =
    EnumSet.allOf(SequenceFile.CompressionType.class);
  for (CompressionType redCompression : seq) {
    for(int combine=0; combine < 2; ++combine) {
      checkCompression(false, redCompression, combine == 1);
      checkCompression(true, redCompression, combine == 1);
    }
  }
}
 
源代码2 项目: big-c   文件: TestMapRed.java
@Test 
public void testCompression() throws Exception {
  EnumSet<SequenceFile.CompressionType> seq =
    EnumSet.allOf(SequenceFile.CompressionType.class);
  for (CompressionType redCompression : seq) {
    for(int combine=0; combine < 2; ++combine) {
      checkCompression(false, redCompression, combine == 1);
      checkCompression(true, redCompression, combine == 1);
    }
  }
}
 
源代码3 项目: datacollector   文件: TestRecordWriterManager.java
private void testSeqFile(CompressionCodec compressionCodec, SequenceFile.CompressionType compressionType)
    throws Exception {
  RecordWriterManager mgr = managerBuilder()
    .dirPathTemplate(getTestDir().toString() + "/${YYYY()}")
    .compressionCodec(compressionCodec)
    .compressionType(compressionType)
    .fileType(HdfsFileType.SEQUENCE_FILE)
    .build();

  FileSystem fs = FileSystem.get(uri, hdfsConf);
  Path file = new Path(getTestDir(), UUID.randomUUID().toString());
  long expires = System.currentTimeMillis() + 50000;
  RecordWriter writer = mgr.createWriter(fs, file, 50000);
  Assert.assertTrue(expires <= writer.getExpiresOn());
  Assert.assertFalse(writer.isTextFile());
  Assert.assertTrue(writer.isSeqFile());
  Record record = RecordCreator.create();
  record.set(Field.create("a"));
  writer.write(record);
  writer.close();

  SequenceFile.Reader reader = new SequenceFile.Reader(fs, file, new HdfsConfiguration());
  Text key = new Text();
  Text value = new Text();
  Assert.assertTrue(reader.next(key, value));
  Assert.assertNotNull(UUID.fromString(key.toString()));
  Assert.assertEquals("a", value.toString().trim());
  Assert.assertFalse(reader.next(key, value));
  reader.close();
}
 
源代码4 项目: hadoop-gpu   文件: TestMapRed.java
public void testCompression() throws Exception {
  EnumSet<SequenceFile.CompressionType> seq =
    EnumSet.allOf(SequenceFile.CompressionType.class);
  for (CompressionType redCompression : seq) {
    for(int combine=0; combine < 2; ++combine) {
      checkCompression(false, redCompression, combine == 1);
      checkCompression(true, redCompression, combine == 1);
    }
  }
}
 
源代码5 项目: hbase   文件: Export.java
private static SequenceFile.CompressionType getCompressionType(
    final ExportProtos.ExportRequest request) {
  if (request.hasCompressType()) {
    return SequenceFile.CompressionType.valueOf(request.getCompressType());
  } else {
    return DEFAULT_TYPE;
  }
}
 
源代码6 项目: RDFS   文件: TestMapRed.java
public void testCompression() throws Exception {
  EnumSet<SequenceFile.CompressionType> seq =
    EnumSet.allOf(SequenceFile.CompressionType.class);
  for (CompressionType redCompression : seq) {
    for(int combine=0; combine < 2; ++combine) {
      checkCompression(false, redCompression, combine == 1);
      checkCompression(true, redCompression, combine == 1);
    }
  }
}
 
源代码7 项目: Flink-CEPplus   文件: SequenceFileWriter.java
SequenceFile.CompressionType getCompressionType() {
	return compressionType;
}
 
源代码8 项目: flink   文件: SequenceFileWriter.java
SequenceFile.CompressionType getCompressionType() {
	return compressionType;
}
 
源代码9 项目: jstorm   文件: SequenceFileBolt.java
public SequenceFileBolt withCompressionType(SequenceFile.CompressionType compressionType){
    this.compressionType = compressionType;
    return this;
}
 
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    String mimeType = flowFile.getAttribute(CoreAttributes.MIME_TYPE.key());
    String packagingFormat = NOT_PACKAGED;
    if (null != mimeType) {
        switch (mimeType.toLowerCase()) {
            case "application/tar":
                packagingFormat = TAR_FORMAT;
                break;
            case "application/zip":
                packagingFormat = ZIP_FORMAT;
                break;
            case "application/flowfile-v3":
                packagingFormat = FLOWFILE_STREAM_FORMAT_V3;
                break;
            default:
                getLogger().warn(
                        "Cannot unpack {} because its mime.type attribute is set to '{}', which is not a format that can be unpacked",
                        new Object[]{flowFile, mimeType});
        }
    }
    final SequenceFileWriter sequenceFileWriter;
    switch (packagingFormat) {
        case TAR_FORMAT:
            sequenceFileWriter = new TarUnpackerSequenceFileWriter();
            break;
        case ZIP_FORMAT:
            sequenceFileWriter = new ZipUnpackerSequenceFileWriter();
            break;
        case FLOWFILE_STREAM_FORMAT_V3:
            sequenceFileWriter = new FlowFileStreamUnpackerSequenceFileWriter();
            break;
        default:
            sequenceFileWriter = new SequenceFileWriterImpl();
    }

    final Configuration configuration = getConfiguration();
    if (configuration == null) {
        getLogger().error("HDFS not configured properly");
        session.transfer(flowFile, RELATIONSHIP_FAILURE);
        context.yield();
        return;
    }

    final CompressionCodec codec = getCompressionCodec(context, configuration);

    final String value = context.getProperty(COMPRESSION_TYPE).getValue();
    final SequenceFile.CompressionType compressionType = value == null
        ? SequenceFile.CompressionType.valueOf(DEFAULT_COMPRESSION_TYPE) : SequenceFile.CompressionType.valueOf(value);

    final String fileName = flowFile.getAttribute(CoreAttributes.FILENAME.key()) + ".sf";
    flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), fileName);

    try {
        StopWatch stopWatch = new StopWatch(true);
        flowFile = sequenceFileWriter.writeSequenceFile(flowFile, session, configuration, compressionType, codec);
        session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(flowFile, RELATIONSHIP_SUCCESS);
        getLogger().info("Transferred flowfile {} to {}", new Object[]{flowFile, RELATIONSHIP_SUCCESS});
    } catch (ProcessException e) {
        getLogger().error("Failed to create Sequence File. Transferring {} to 'failure'", new Object[]{flowFile}, e);
        session.transfer(flowFile, RELATIONSHIP_FAILURE);
    }

}
 
源代码11 项目: datacollector   文件: RecordWriterManager.java
public RecordWriterManager(
    FileSystem fs,
    Configuration hdfsConf,
    String uniquePrefix,
    String fileNameSuffix,
    boolean dirPathTemplateInHeader,
    String dirPathTemplate,
    TimeZone timeZone,
    long cutOffSecs,
    long cutOffSizeBytes,
    long cutOffRecords,
    HdfsFileType fileType,
    CompressionCodec compressionCodec,
    SequenceFile.CompressionType compressionType,
    String keyEL,
    boolean rollIfHeader,
    String rollHeaderName,
    String fileNameEL,
    WholeFileExistsAction wholeFileAlreadyExistsAction,
    String permissionEL,
    DataGeneratorFactory generatorFactory,
    Target.Context context,
    String config
) {
  this.fs = fs;
  this.hdfsConf = hdfsConf;
  this.uniquePrefix = uniquePrefix;
  this.fileNameSuffix = fileNameSuffix;
  this.dirPathTemplateInHeader = dirPathTemplateInHeader;
  this.dirPathTemplate = dirPathTemplate;
  this.timeZone = timeZone;
  this.cutOffMillis = preventOverflow(cutOffSecs * 1000);
  this.cutOffSize = cutOffSizeBytes;
  this.cutOffRecords = cutOffRecords;
  this.fileType = fileType;
  this.compressionCodec = compressionCodec;
  this.compressionType = compressionType;
  this.keyEL = keyEL;
  this.generatorFactory = generatorFactory;
  this.context = context;
  this.rollIfHeader = rollIfHeader;
  this.rollHeaderName = rollHeaderName;
  closedPaths = new ConcurrentLinkedQueue<>();
  pathResolver = new PathResolver(context, config, dirPathTemplate, timeZone);
  fsHelper = getFsHelper(context, fileNameEL, wholeFileAlreadyExistsAction, permissionEL);
}
 
源代码12 项目: flink   文件: SequenceFileWriter.java
SequenceFile.CompressionType getCompressionType() {
	return compressionType;
}
 
源代码13 项目: storm-hdfs   文件: HdfsState.java
public SequenceFileOptions withCompressionType(SequenceFile.CompressionType compressionType){
    this.compressionType = compressionType;
    return this;
}
 
源代码14 项目: storm-hdfs   文件: SequenceFileBolt.java
public SequenceFileBolt withCompressionType(SequenceFile.CompressionType compressionType){
    this.compressionType = compressionType;
    return this;
}
 
源代码15 项目: jstorm   文件: HdfsState.java
public SequenceFileOptions withCompressionType(SequenceFile.CompressionType compressionType) {
    this.compressionType = compressionType;
    return this;
}
 
源代码16 项目: Flink-CEPplus   文件: SequenceFileWriterFactory.java
/**
 * Creates a new SequenceFileWriterFactory using the given builder to assemble the
 * SequenceFileWriter.
 *
 * @param hadoopConf           The Hadoop configuration for Sequence File Writer.
 * @param keyClass             The class of key to write.
 * @param valueClass           The class of value to write.
 * @param compressionCodecName The name of compression codec.
 * @param compressionType      The type of compression level.
 */
public SequenceFileWriterFactory(Configuration hadoopConf, Class<K> keyClass, Class<V> valueClass, String compressionCodecName, SequenceFile.CompressionType compressionType) {
	this.serializableHadoopConfig = new SerializableHadoopConfiguration(checkNotNull(hadoopConf));
	this.keyClass = checkNotNull(keyClass);
	this.valueClass = checkNotNull(valueClass);
	this.compressionCodecName = checkNotNull(compressionCodecName);
	this.compressionType = checkNotNull(compressionType);
}
 
源代码17 项目: flink   文件: SequenceFileWriterFactory.java
/**
 * Creates a new SequenceFileWriterFactory using the given builder to assemble the
 * SequenceFileWriter.
 *
 * @param hadoopConf           The Hadoop configuration for Sequence File Writer.
 * @param keyClass             The class of key to write.
 * @param valueClass           The class of value to write.
 * @param compressionCodecName The name of compression codec.
 * @param compressionType      The type of compression level.
 */
public SequenceFileWriterFactory(Configuration hadoopConf, Class<K> keyClass, Class<V> valueClass, String compressionCodecName, SequenceFile.CompressionType compressionType) {
	this.serializableHadoopConfig = new SerializableHadoopConfiguration(checkNotNull(hadoopConf));
	this.keyClass = checkNotNull(keyClass);
	this.valueClass = checkNotNull(valueClass);
	this.compressionCodecName = checkNotNull(compressionCodecName);
	this.compressionType = checkNotNull(compressionType);
}
 
源代码18 项目: Flink-CEPplus   文件: SequenceFileWriter.java
/**
 * Creates a new {@code SequenceFileWriter} that writes sequence with the given
 * compression codec and compression type.
 *
 * @param compressionCodecName Name of a Hadoop Compression Codec.
 * @param compressionType The compression type to use.
 */
public SequenceFileWriter(String compressionCodecName,
		SequenceFile.CompressionType compressionType) {
	this.compressionCodecName = compressionCodecName;
	this.compressionType = compressionType;
}
 
源代码19 项目: flink   文件: SequenceFileWriter.java
/**
 * Creates a new {@code SequenceFileWriter} that writes sequence with the given
 * compression codec and compression type.
 *
 * @param compressionCodecName Name of a Hadoop Compression Codec.
 * @param compressionType The compression type to use.
 */
public SequenceFileWriter(String compressionCodecName,
		SequenceFile.CompressionType compressionType) {
	this.compressionCodecName = compressionCodecName;
	this.compressionType = compressionType;
}
 
源代码20 项目: flink   文件: SequenceFileWriter.java
/**
 * Creates a new {@code SequenceFileWriter} that writes sequence with the given
 * compression codec and compression type.
 *
 * @param compressionCodecName Name of a Hadoop Compression Codec.
 * @param compressionType The compression type to use.
 */
public SequenceFileWriter(String compressionCodecName,
		SequenceFile.CompressionType compressionType) {
	this.compressionCodecName = compressionCodecName;
	this.compressionType = compressionType;
}