类org.apache.hadoop.hbase.io.compress.Compression.Algorithm源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.io.compress.Compression.Algorithm的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: antsdb   文件: Helper.java
public static void createTable(Connection conn, String namespace, String tableName, Algorithm compressionType) {
    // Check whether table already exists
    if (Helper.existsTable(conn, namespace, tableName)) {
        Helper.dropTable(conn, namespace, tableName);
    }
    if (!Helper.existsTable(conn, namespace, tableName)) {
        
        // Create table
        try (Admin admin = conn.getAdmin()) {
        HTableDescriptor table = new HTableDescriptor(TableName.valueOf(namespace, tableName));
        table.addFamily(new HColumnDescriptor(DATA_COLUMN_FAMILY).setCompressionType(compressionType));
        _log.debug("creating table {}", table.toString());
        admin.createTable(table);
        } 
        catch (Exception ex) {
            throw new OrcaHBaseException(ex, "Failed to create table - " + tableName);
        }
    }
}
 
源代码2 项目: antsdb   文件: Helper.java
public static void truncateTable(Connection connection, String namespace, String tableName) {
    try {
    
        TableName table = TableName.valueOf(namespace, tableName);
        
        // get compression type
        Table htable = connection.getTable(table);          
        HTableDescriptor tableDesc = htable.getTableDescriptor();
        HColumnDescriptor[] families = tableDesc.getColumnFamilies();
        Algorithm compressionType =  families[0].getCompression();
        
        // drop table
        dropTable(connection, namespace, tableName);
        
        // create table
        createTable(connection, namespace, tableName, compressionType);

    } 
    catch (Exception ex) {
        throw new OrcaHBaseException("Failed to truncate table - " + tableName, ex);
    }
}
 
源代码3 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[] columnFamily, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  tableDescriptor.setDurability(durability);
  tableDescriptor.setRegionReplication(regionReplication);
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(columnFamily);
  familyDescriptor.setDataBlockEncoding(dataBlockEncoding);
  familyDescriptor.setCompressionType(compression);
  return createPreSplitLoadTestTable(conf, tableDescriptor, familyDescriptor,
    numRegionsPerServer);
}
 
源代码4 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[][] columnFamilies, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  tableDescriptor.setDurability(durability);
  tableDescriptor.setRegionReplication(regionReplication);
  ColumnFamilyDescriptor[] hcds = new ColumnFamilyDescriptor[columnFamilies.length];
  for (int i = 0; i < columnFamilies.length; i++) {
    ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(columnFamilies[i]);
    familyDescriptor.setDataBlockEncoding(dataBlockEncoding);
    familyDescriptor.setCompressionType(compression);
    hcds[i] = familyDescriptor;
  }
  return createPreSplitLoadTestTable(conf, tableDescriptor, hcds, numRegionsPerServer);
}
 
源代码5 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
  List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
          ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
        columnFamilyDescriptorBuilder.setCompressionType(compressionType);
        columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
        columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
        columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
        familyId++;
      }
    }
  }
  return columnFamilyDescriptors;
}
 
源代码6 项目: hbase   文件: TestSCVFWithMiniCluster.java
private static void create(Admin admin, TableName tableName, byte[]... families)
    throws IOException {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  for (byte[] family : families) {
    ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family);
    familyDescriptor.setMaxVersions(1);
    familyDescriptor.setCompressionType(Algorithm.GZ);
    tableDescriptor.setColumnFamily(familyDescriptor);
  }
  try {
    admin.createTable(tableDescriptor);
  } catch (TableExistsException tee) {
    /* Ignore */
  }
}
 
源代码7 项目: hbase   文件: TestHFileBlock.java
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
 
源代码8 项目: hbase   文件: TestHFileBlock.java
static void assertBuffersEqual(ByteBuff expectedBuffer,
    ByteBuff actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
 
源代码9 项目: hbase   文件: TestHFileDataBlockEncoder.java
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, 0, 0, -1, meta, ByteBuffAllocator.HEAP);
  return b;
}
 
源代码10 项目: hbase   文件: TestHFileScannerImplReferenceCount.java
private void writeHFile(Configuration conf, FileSystem fs, Path hfilePath, Algorithm compression,
    DataBlockEncoding encoding, int cellCount) throws IOException {
  HFileContext context =
      new HFileContextBuilder().withBlockSize(1).withDataBlockEncoding(DataBlockEncoding.NONE)
          .withCompression(compression).withDataBlockEncoding(encoding).build();
  try (HFile.Writer writer =
      new HFile.WriterFactory(conf, new CacheConfig(conf)).withPath(fs, hfilePath)
          .withFileContext(context).create()) {
    Random rand = new Random(9713312); // Just a fixed seed.
    for (int i = 0; i < cellCount; ++i) {
      byte[] keyBytes = Bytes.add(Bytes.toBytes(i), SUFFIX);

      // A random-length random value.
      byte[] valueBytes = RandomKeyValueUtil.randomValue(rand);
      KeyValue keyValue =
          new KeyValue(keyBytes, FAMILY, QUALIFIER, HConstants.LATEST_TIMESTAMP, valueBytes);
      if (firstCell == null) {
        firstCell = keyValue;
      } else if (secondCell == null) {
        secondCell = keyValue;
      }
      writer.append(keyValue);
    }
  }
}
 
源代码11 项目: hbase   文件: TestHFileScannerImplReferenceCount.java
@Test
public void testDisabledBlockCache() throws Exception {
  writeHFile(conf, fs, hfilePath, Algorithm.NONE, DataBlockEncoding.NONE, CELL_COUNT);
  // Set LruBlockCache
  conf.setFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  BlockCache defaultBC = BlockCacheFactory.createBlockCache(conf);
  Assert.assertNull(defaultBC);
  CacheConfig cacheConfig = new CacheConfig(conf, null, defaultBC, allocator);
  Assert.assertFalse(cacheConfig.isCombinedBlockCache()); // Must be LruBlockCache.
  HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConfig, true, conf);
  Assert.assertTrue(reader instanceof HFileReaderImpl);
  // We've build a HFile tree with index = 16.
  Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());

  HFileBlock block1 = reader.getDataBlockIndexReader()
      .loadDataBlockWithScanInfo(firstCell, null, true, true, false,
          DataBlockEncoding.NONE, reader).getHFileBlock();

  Assert.assertTrue(block1.isSharedMem());
  Assert.assertTrue(block1 instanceof SharedMemHFileBlock);
  Assert.assertEquals(1, block1.refCnt());
  Assert.assertTrue(block1.release());
}
 
源代码12 项目: hbase   文件: TestDataBlockEncoders.java
/**
 * Test whether the decompression of first key is implemented correctly.
 * @throws IOException
 */
@Test
public void testFirstKeyInBlockOnSample() throws IOException {
  List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);

  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null) {
      continue;
    }
    DataBlockEncoder encoder = encoding.getEncoder();
    ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
        getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
    Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer));
    KeyValue firstKv = sampleKv.get(0);
    if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) {
      int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(key, firstKv, false, true);
      fail(String.format("Bug in '%s' commonPrefix %d", encoder.toString(), commonPrefix));
    }
  }
}
 
源代码13 项目: hbase   文件: TestDataBlockEncoders.java
@Test
public void testRowIndexWithTagsButNoTagsInCell() throws IOException {
  List<KeyValue> kvList = new ArrayList<>();
  byte[] row = new byte[0];
  byte[] family = new byte[0];
  byte[] qualifier = new byte[0];
  byte[] value = new byte[0];
  KeyValue expectedKV = new KeyValue(row, family, qualifier, 1L, Type.Put, value);
  kvList.add(expectedKV);
  DataBlockEncoding encoding = DataBlockEncoding.ROW_INDEX_V1;
  DataBlockEncoder encoder = encoding.getEncoder();
  ByteBuffer encodedBuffer =
      encodeKeyValues(encoding, kvList, getEncodingContext(Algorithm.NONE, encoding), false);
  HFileContext meta =
      new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
          .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
  DataBlockEncoder.EncodedSeeker seeker =
    encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
  seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
  Cell cell = seeker.getCell();
  Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength());
}
 
源代码14 项目: hbase   文件: TestDataBlockEncoders.java
private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf,
    DataBlockEncoder encoder) throws IOException {
  // decode
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET,
      encodedData.length - ENCODED_DATA_OFFSET);
  DataInputStream dis = new DataInputStream(bais);
  ByteBuffer actualDataset;
  HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
      .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags)
      .withCompression(Compression.Algorithm.NONE).build();
  actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
  actualDataset.rewind();

  // this is because in case of prefix tree the decoded stream will not have
  // the
  // mvcc in it.
  assertEquals("Encoding -> decoding gives different results for " + encoder,
      Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}
 
源代码15 项目: hbase   文件: TestHFileOutputFormat2.java
private void setupMockColumnFamiliesForCompression(Table table,
    Map<String, Compression.Algorithm> familyToCompression) throws IOException {

  TableDescriptorBuilder mockTableDescriptor =
    TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]);
  for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
    ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder
      .newBuilder(Bytes.toBytes(entry.getKey()))
      .setMaxVersions(1)
      .setCompressionType(entry.getValue())
      .setBlockCacheEnabled(false)
      .setTimeToLive(0)
      .build();

    mockTableDescriptor.setColumnFamily(columnFamilyDescriptor);
  }
  Mockito.doReturn(mockTableDescriptor.build()).when(table).getDescriptor();
}
 
源代码16 项目: hbase   文件: TestHFileOutputFormat2.java
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm>
    getMockColumnFamiliesForCompression (int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression = new HashMap<>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
源代码17 项目: examples   文件: Create2.java
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
  Configuration conf = HBaseConfiguration.create();
  HBaseAdmin admin = new HBaseAdmin(conf);
  // tag::CREATE2[]
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("pages"));
  byte[][] splits = {Bytes.toBytes("b"), Bytes.toBytes("f"),
    Bytes.toBytes("k"), Bytes.toBytes("n"), Bytes.toBytes("t")};
  desc.setValue(Bytes.toBytes("comment"), Bytes.toBytes("Create 10012014"));
  HColumnDescriptor family = new HColumnDescriptor("c");
  family.setCompressionType(Algorithm.GZ);
  family.setMaxVersions(52);
  family.setBloomFilterType(BloomType.ROW);
  desc.addFamily(family);
  admin.createTable(desc, splits);
  // end::CREATE2[]
  admin.close();
}
 
源代码18 项目: examples   文件: CreateTable.java
public static void main(String[] args) throws MasterNotRunningException,
    ZooKeeperConnectionException, IOException {
  try (Connection connection = ConnectionFactory.createConnection();
      Admin admin = connection.getAdmin();) {
    LOG.info("Starting table creation");
    // tag::CREATE[]
    TableName documents = TableName.valueOf("documents");
    HTableDescriptor desc = new HTableDescriptor(documents);
    HColumnDescriptor family = new HColumnDescriptor("c");
    family.setCompressionType(Algorithm.GZ);
    family.setBloomFilterType(BloomType.NONE);
    desc.addFamily(family);
    UniformSplit uniformSplit = new UniformSplit();
    admin.createTable(desc, uniformSplit.split(8));
    // end::CREATE[]
    LOG.info("Table successfuly created");
  }
}
 
源代码19 项目: kylin-on-parquet-v2   文件: HFileOutputFormat3.java
/**
 * Runs inside the task to deserialize column family to compression algorithm
 * map from the configuration.
 *
 * @param conf to read the serialized values from
 * @return a map from column family to the configured compression algorithm
 */
@VisibleForTesting
static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration conf) {
    Map<byte[], String> stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY);
    Map<byte[], Algorithm> compressionMap = new TreeMap<byte[], Algorithm>(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
        Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue());
        compressionMap.put(e.getKey(), algorithm);
    }
    return compressionMap;
}
 
源代码20 项目: antsdb   文件: HBaseStorageService.java
@Override
public void open(File home, ConfigService antsdbConfig, boolean isMutable) throws Exception {
    this.isMutable = isMutable;
    
    // options used by hbase service
    this.bufferSize = antsdbConfig.getHBaseBufferSize();
    this.maxColumnPerPut = antsdbConfig.getHBaseMaxColumnsPerPut();        
    String compressCodec = antsdbConfig.getHBaseCompressionCodec();
    this.compressionType = Algorithm.valueOf(compressCodec.toUpperCase());
    this.sysns = antsdbConfig.getSystemNamespace();
    _log.info("system namespace: {}", this.sysns);
    this.tnCheckpoint = TableName.valueOf(this.sysns, TABLE_SYNC_PARAM);
    
    // Configuration object, first try to find hbase-site.xml, then the embedded hbase/zookeeper settings
    
    try {
        this.hbaseConfig = getHBaseConfig(antsdbConfig);
        getConnection();
        _log.info("HBase is connected ");

        // Initialize HBase database for antsdb
        init();
    }
    catch (Throwable x) {
        if (this.hbaseConnection != null) {
            this.hbaseConnection.close();
        }
        throw x;
    }
}
 
源代码21 项目: kylin   文件: HFileOutputFormat3.java
/**
 * Runs inside the task to deserialize column family to compression algorithm
 * map from the configuration.
 *
 * @param conf to read the serialized values from
 * @return a map from column family to the configured compression algorithm
 */
@VisibleForTesting
static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration conf) {
    Map<byte[], String> stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY);
    Map<byte[], Algorithm> compressionMap = new TreeMap<byte[], Algorithm>(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
        Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue());
        compressionMap.put(e.getKey(), algorithm);
    }
    return compressionMap;
}
 
源代码22 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Create all combinations of Bloom filters and compression algorithms for
 * testing.
 */
private static List<Object[]> bloomAndCompressionCombinations() {
  List<Object[]> configurations = new ArrayList<>();
  for (Compression.Algorithm comprAlgo :
       HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
    for (BloomType bloomType : BloomType.values()) {
      configurations.add(new Object[] { comprAlgo, bloomType });
    }
  }
  return Collections.unmodifiableList(configurations);
}
 
源代码23 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[] columnFamily, Algorithm compression,
    DataBlockEncoding dataBlockEncoding) throws IOException {
  return createPreSplitLoadTestTable(conf, tableName,
    columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
    Durability.USE_DEFAULT);
}
 
源代码24 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
  String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
  List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
  for (String algoName : allAlgos) {
    try {
      Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
      algo.getCompressor();
      supportedAlgos.add(algo);
    } catch (Throwable t) {
      // this algo is not available
    }
  }
  return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
 
源代码25 项目: hbase   文件: TestHFileBlock.java
public byte[] createTestV1Block(Compression.Algorithm algo)
    throws IOException {
  Compressor compressor = algo.getCompressor();
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  OutputStream os = algo.createCompressionStream(baos, compressor, 0);
  DataOutputStream dos = new DataOutputStream(os);
  BlockType.META.write(dos); // Let's make this a meta block.
  writeTestBlockContents(dos);
  dos.flush();
  algo.returnCompressor(compressor);
  return baos.toByteArray();
}
 
源代码26 项目: hbase   文件: TestHFileBlock.java
public String createTestBlockStr(Compression.Algorithm algo,
    int correctLength, boolean useTag) throws IOException {
  HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS, useTag);
  byte[] testV2Block = hbw.getHeaderAndDataForTest();
  int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE + 9;
  if (testV2Block.length == correctLength) {
    // Force-set the "OS" field of the gzip header to 3 (Unix) to avoid
    // variations across operating systems.
    // See http://www.gzip.org/zlib/rfc-gzip.html for gzip format.
    // We only make this change when the compressed block length matches.
    // Otherwise, there are obviously other inconsistencies.
    testV2Block[osOffset] = 3;
  }
  return Bytes.toStringBinary(testV2Block);
}
 
源代码27 项目: hbase   文件: TestHFileBlock.java
protected void testBlockHeapSizeInternals() {
  if (ClassSize.is32BitJVM()) {
    assertEquals(64, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
  } else {
    assertEquals(80, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
  }

  for (int size : new int[] { 100, 256, 12345 }) {
    byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
    HFileContext meta = new HFileContextBuilder()
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withHBaseCheckSum(false)
                        .withCompression(Algorithm.NONE)
                        .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                        .withChecksumType(ChecksumType.NULL).build();
    HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
        HFileBlock.FILL_HEADER, -1, 0, -1, meta, HEAP);
    long byteBufferExpectedSize =
        ClassSize.align(ClassSize.estimateBase(new MultiByteBuff(buf).getClass(), true)
            + HConstants.HFILEBLOCK_HEADER_SIZE + size);
    long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
    long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
    long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
    assertEquals("Block data size: " + size + ", byte buffer expected " +
        "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
        "size: " + hfileBlockExpectedSize + " HFileContext class expected size: "
            + hfileMetaSize + "; ", expected,
        block.heapSize());
  }
}
 
源代码28 项目: hbase   文件: TestHFileWriterV3.java
private void testMidKeyInHFileInternals(boolean useTags) throws IOException {
  Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
  "testMidKeyInHFile");
  Compression.Algorithm compressAlgo = Compression.Algorithm.NONE;
  int entryCount = 50000;
  writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true, useTags);
}
 
源代码29 项目: hbase   文件: TestHFileDataBlockEncoder.java
/**
 * Test encoding with offheap keyvalue. This test just verifies if the encoders
 * work with DBB and does not use the getXXXArray() API
 * @throws IOException
 */
@Test
public void testEncodingWithOffheapKeyValue() throws IOException {
  // usually we have just block without headers, but don't complicate that
  try {
    List<Cell> kvs = generator.generateTestExtendedOffheapKeyValues(60, true);
    HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS)
        .withIncludesTags(true).withHBaseCheckSum(true).withCompression(Algorithm.NONE)
        .withBlockSize(0).withChecksumType(ChecksumType.NULL).build();
    writeBlock(kvs, meta, true);
  } catch (IllegalArgumentException e) {
    fail("No exception should have been thrown");
  }
}
 
源代码30 项目: hbase   文件: TestDataBlockEncoders.java
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
    DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTags)
                      .withCompression(algo).build();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
  } else {
    return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
  }
}
 
 类所在包
 类方法
 同包方法