下面列出了怎么用org.apache.hadoop.hbase.io.hfile.Compression.Algorithm的API类实例代码及写法,或者点击链接到github查看源代码。
public HFileSortedOplogWriter(int keys) throws IOException {
try {
int hfileBlockSize = Integer.getInteger(
HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));
Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
HoplogConfig.COMPRESSION_DEFAULT));
// ByteComparator bc = new ByteComparator();
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fsProvider.getFS(), path)
.withBlockSize(hfileBlockSize)
// .withComparator(bc)
.withCompression(compress)
.create();
bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
writer);
logger.fine("Created hoplog writer with compression " + compress);
} catch (IOException e) {
logger.fine("IO Error while creating writer");
throw e;
}
}
public HFileSortedOplogWriter(int keys) throws IOException {
try {
int hfileBlockSize = Integer.getInteger(
HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));
Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
HoplogConfig.COMPRESSION_DEFAULT));
// ByteComparator bc = new ByteComparator();
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fsProvider.getFS(), path)
.withBlockSize(hfileBlockSize)
// .withComparator(bc)
.withCompression(compress)
.create();
bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
writer);
logger.fine("Created hoplog writer with compression " + compress);
} catch (IOException e) {
logger.fine("IO Error while creating writer");
throw e;
}
}
public static Algorithm convertCompression(Compression type) {
switch (type) {
default:
case NONE: return Algorithm.NONE;
}
}
public static Algorithm convertCompression(Compression type) {
switch (type) {
default:
case NONE: return Algorithm.NONE;
}
}