类org.apache.hadoop.io.compress.snappy.SnappyDecompressor源码实例Demo

下面列出了怎么用org.apache.hadoop.io.compress.snappy.SnappyDecompressor的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hadoop   文件: SnappyCodec.java
/**
 * Are the native snappy libraries loaded & initialized?
 */
public static void checkNativeCodeLoaded() {
    if (!NativeCodeLoader.isNativeCodeLoaded() ||
        !NativeCodeLoader.buildSupportsSnappy()) {
      throw new RuntimeException("native snappy library not available: " +
          "this version of libhadoop was built without " +
          "snappy support.");
    }
    if (!SnappyCompressor.isNativeCodeLoaded()) {
      throw new RuntimeException("native snappy library not available: " +
          "SnappyCompressor has not been loaded.");
    }
    if (!SnappyDecompressor.isNativeCodeLoaded()) {
      throw new RuntimeException("native snappy library not available: " +
          "SnappyDecompressor has not been loaded.");
    }
}
 
源代码2 项目: hadoop   文件: TestCompressorDecompressor.java
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;
  
  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
 
源代码3 项目: hadoop   文件: TestCompressorDecompressor.java
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
 
源代码4 项目: big-c   文件: SnappyCodec.java
/**
 * Are the native snappy libraries loaded & initialized?
 */
public static void checkNativeCodeLoaded() {
    if (!NativeCodeLoader.isNativeCodeLoaded() ||
        !NativeCodeLoader.buildSupportsSnappy()) {
      throw new RuntimeException("native snappy library not available: " +
          "this version of libhadoop was built without " +
          "snappy support.");
    }
    if (!SnappyCompressor.isNativeCodeLoaded()) {
      throw new RuntimeException("native snappy library not available: " +
          "SnappyCompressor has not been loaded.");
    }
    if (!SnappyDecompressor.isNativeCodeLoaded()) {
      throw new RuntimeException("native snappy library not available: " +
          "SnappyDecompressor has not been loaded.");
    }
}
 
源代码5 项目: big-c   文件: TestCompressorDecompressor.java
@Test
public void testCompressorDecompressor() {
  // no more for this data
  int SIZE = 44 * 1024;
  
  byte[] rawData = generate(SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(new SnappyCompressor(), new SnappyDecompressor())
        .withCompressDecompressPair(new Lz4Compressor(), new Lz4Decompressor())
        .withCompressDecompressPair(new BuiltInZlibDeflater(), new BuiltInZlibInflater())
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressor error !!!" + ex);
  }
}
 
源代码6 项目: big-c   文件: TestCompressorDecompressor.java
@Test
public void testCompressorDecompressorWithExeedBufferLimit() {
  int BYTE_SIZE = 100 * 1024;
  byte[] rawData = generate(BYTE_SIZE);
  try {
    CompressDecompressTester.of(rawData)
        .withCompressDecompressPair(
            new SnappyCompressor(BYTE_SIZE + BYTE_SIZE / 2),
            new SnappyDecompressor(BYTE_SIZE + BYTE_SIZE / 2))
        .withCompressDecompressPair(new Lz4Compressor(BYTE_SIZE),
            new Lz4Decompressor(BYTE_SIZE))
        .withTestCases(ImmutableSet.of(CompressionTestStrategy.COMPRESS_DECOMPRESS_SINGLE_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_BLOCK,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_ERRORS,
                    CompressionTestStrategy.COMPRESS_DECOMPRESS_WITH_EMPTY_STREAM))
        .test();

  } catch (Exception ex) {
    fail("testCompressorDecompressorWithExeedBufferLimit error !!!" + ex);
  }
}
 
源代码7 项目: hadoop   文件: SnappyCodec.java
/**
 * Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
 *
 * @return a new decompressor for use by this codec
 */
@Override
public Decompressor createDecompressor() {
  checkNativeCodeLoaded();
  int bufferSize = conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
  return new SnappyDecompressor(bufferSize);
}
 
源代码8 项目: big-c   文件: SnappyCodec.java
/**
 * Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
 *
 * @return a new decompressor for use by this codec
 */
@Override
public Decompressor createDecompressor() {
  checkNativeCodeLoaded();
  int bufferSize = conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
  return new SnappyDecompressor(bufferSize);
}
 
源代码9 项目: RDFS   文件: SnappyCodec.java
/**
 * Get the type of {@link Decompressor} needed by this
 * {@link CompressionCodec}.
 *
 * @return the type of decompressor needed by this codec.
 */
@Override
public Class<? extends Decompressor> getDecompressorType() {
  if (!isNativeSnappyLoaded(conf)) {
    throw new RuntimeException("native snappy library not available");
  }

  return SnappyDecompressor.class;
}
 
源代码10 项目: RDFS   文件: SnappyCodec.java
/**
 * Create a new {@link Decompressor} for use by this
 * {@link CompressionCodec}.
 *
 * @return a new decompressor for use by this codec
 */
@Override
public Decompressor createDecompressor() {
  if (!isNativeSnappyLoaded(conf)) {
    throw new RuntimeException("native snappy library not available");
  }
  int bufferSize = conf.getInt(
      IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
      IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
  return new SnappyDecompressor(bufferSize);
}
 
源代码11 项目: hadoop   文件: SnappyCodec.java
public static boolean isNativeCodeLoaded() {
  return SnappyCompressor.isNativeCodeLoaded() && 
      SnappyDecompressor.isNativeCodeLoaded();
}
 
源代码12 项目: hadoop   文件: SnappyCodec.java
/**
 * Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
 *
 * @return the type of decompressor needed by this codec.
 */
@Override
public Class<? extends Decompressor> getDecompressorType() {
  checkNativeCodeLoaded();
  return SnappyDecompressor.class;
}
 
源代码13 项目: dremio-oss   文件: ScanWithHiveReader.java
private static Class<? extends HiveAbstractReader> getNativeReaderClass(Optional<String> formatName,
                                                                        OptionManager options, Configuration configuration, boolean mixedSchema, boolean isTransactional) {
  if (!formatName.isPresent()) {
    return HiveDefaultReader.class;
  }

  Class<? extends HiveAbstractReader> readerClass = readerMap.get(formatName.get());
  if (readerClass == HiveOrcReader.class) {
    // Validate reader
    if (OrcConf.USE_ZEROCOPY.getBoolean(configuration)) {
      if (!NativeCodeLoader.isNativeCodeLoaded()) {
        throw UserException.dataReadError()
            .message("Hadoop native library is required for Hive ORC data, but is not loaded").build(logger);
      }
      // TODO: find a way to access compression codec information?
      if (!SnappyDecompressor.isNativeCodeLoaded()) {
        throw UserException.dataReadError()
          .message("Snappy native library is required for Hive ORC data, but is not loaded").build(logger);
      }

      if (!isNativeZlibLoaded) {
        throw UserException
        .dataReadError()
        .message("Zlib native library is required for Hive ORC data, but is not loaded")
        .build(logger);
      }
    }

    if (new HiveSettings(options).vectorizeOrcReaders() && !mixedSchema && !isTransactional) {
      // We don't use vectorized ORC reader if there is a schema change between table and partitions or the table is
      // a transactional Hive table
      return HiveORCVectorizedReader.class;
    }
  }

  if (readerClass == null) {
    return HiveDefaultReader.class;
  }

  return readerClass;
}
 
源代码14 项目: dremio-oss   文件: ScanWithHiveReader.java
private static Class<? extends HiveAbstractReader> getNativeReaderClass(Optional<String> formatName,
    OptionManager options, Configuration configuration, boolean mixedSchema, boolean isTransactional) {
  if (!formatName.isPresent()) {
    return HiveDefaultReader.class;
  }

  Class<? extends HiveAbstractReader> readerClass = readerMap.get(formatName.get());
  if (readerClass == HiveOrcReader.class) {
    // Validate reader
    if (OrcConf.USE_ZEROCOPY.getBoolean(configuration)) {
      if (!NativeCodeLoader.isNativeCodeLoaded()) {
        throw UserException.dataReadError()
            .message("Hadoop native library is required for Hive ORC data, but is not loaded").build(logger);
      }
      // TODO: find a way to access compression codec information?
      if (!SnappyDecompressor.isNativeCodeLoaded()) {
        throw UserException.dataReadError()
          .message("Snappy native library is required for Hive ORC data, but is not loaded").build(logger);
      }

      if (!isNativeZlibLoaded) {
        throw UserException
        .dataReadError()
        .message("Zlib native library is required for Hive ORC data, but is not loaded")
        .build(logger);
      }
    }

    if (new HiveSettings(options).vectorizeOrcReaders() && !mixedSchema && !isTransactional) {
      // We don't use vectorized ORC reader if there is a schema change between table and partitions or the table is
      // a transactional Hive table
      return HiveORCVectorizedReader.class;
    }
  }

  if (readerClass == null) {
    return HiveDefaultReader.class;
  }

  return readerClass;
}
 
源代码15 项目: big-c   文件: SnappyCodec.java
public static boolean isNativeCodeLoaded() {
  return SnappyCompressor.isNativeCodeLoaded() && 
      SnappyDecompressor.isNativeCodeLoaded();
}
 
源代码16 项目: big-c   文件: SnappyCodec.java
/**
 * Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
 *
 * @return the type of decompressor needed by this codec.
 */
@Override
public Class<? extends Decompressor> getDecompressorType() {
  checkNativeCodeLoaded();
  return SnappyDecompressor.class;
}
 
源代码17 项目: datacollector   文件: SnappyCodec.java
public SnappyCodec() {
  super(SnappyCompressor.class, SnappyDecompressor.class, ".snappy");
}
 
源代码18 项目: datacollector   文件: SnappyCodec.java
@Override
public Decompressor createDecompressor() {
  return new SnappyDecompressor(getBufferSize());
}
 
 类所在包
 同包方法