org.apache.hadoop.hbase.HConstants#HFILEBLOCK_HEADER_SIZE源码实例Demo

下面列出了org.apache.hadoop.hbase.HConstants#HFILEBLOCK_HEADER_SIZE 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hbase   文件: HFileBlock.java
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext,
    ByteBuffAllocator allocator) {
  if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
        " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
        fileContext.getBytesPerChecksum());
  }
  this.allocator = allocator;
  this.dataBlockEncoder = dataBlockEncoder != null?
      dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE;
  this.dataBlockEncodingCtx = this.dataBlockEncoder.
      newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
  // TODO: This should be lazily instantiated since we usually do NOT need this default encoder
  this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
      HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
  // TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum
  baosInMemory = new ByteArrayOutputStream();
  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i) {
    prevOffsetByType[i] = UNSET;
  }
  // TODO: Why fileContext saved away when we have dataBlockEncoder and/or
  // defaultDataBlockEncoder?
  this.fileContext = fileContext;
}
 
源代码2 项目: hbase   文件: TestHFileBlock.java
@Test
public void testSerializeWithoutNextBlockMetadata() {
  int size = 100;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  byte[] byteArr = new byte[length];
  ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
      ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, alloc);
  HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
      ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, alloc);
  ByteBuffer buff1 = ByteBuffer.allocate(length);
  ByteBuffer buff2 = ByteBuffer.allocate(length);
  blockWithNextBlockMetadata.serialize(buff1, true);
  blockWithoutNextBlockMetadata.serialize(buff2, true);
  assertNotEquals(buff1, buff2);
  buff1.clear();
  buff2.clear();
  blockWithNextBlockMetadata.serialize(buff1, false);
  blockWithoutNextBlockMetadata.serialize(buff2, false);
  assertEquals(buff1, buff2);
}
 
源代码3 项目: hbase   文件: TestHFileBlock.java
public String createTestBlockStr(Compression.Algorithm algo,
    int correctLength, boolean useTag) throws IOException {
  HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS, useTag);
  byte[] testV2Block = hbw.getHeaderAndDataForTest();
  int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE + 9;
  if (testV2Block.length == correctLength) {
    // Force-set the "OS" field of the gzip header to 3 (Unix) to avoid
    // variations across operating systems.
    // See http://www.gzip.org/zlib/rfc-gzip.html for gzip format.
    // We only make this change when the compressed block length matches.
    // Otherwise, there are obviously other inconsistencies.
    testV2Block[osOffset] = 3;
  }
  return Bytes.toStringBinary(testV2Block);
}
 
源代码4 项目: hbase   文件: TestHFileBlock.java
protected void testBlockHeapSizeInternals() {
  if (ClassSize.is32BitJVM()) {
    assertEquals(64, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
  } else {
    assertEquals(80, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
  }

  for (int size : new int[] { 100, 256, 12345 }) {
    byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
    HFileContext meta = new HFileContextBuilder()
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withHBaseCheckSum(false)
                        .withCompression(Algorithm.NONE)
                        .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                        .withChecksumType(ChecksumType.NULL).build();
    HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
        HFileBlock.FILL_HEADER, -1, 0, -1, meta, HEAP);
    long byteBufferExpectedSize =
        ClassSize.align(ClassSize.estimateBase(new MultiByteBuff(buf).getClass(), true)
            + HConstants.HFILEBLOCK_HEADER_SIZE + size);
    long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
    long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
    long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
    assertEquals("Block data size: " + size + ", byte buffer expected " +
        "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
        "size: " + hfileBlockExpectedSize + " HFileContext class expected size: "
            + hfileMetaSize + "; ", expected,
        block.heapSize());
  }
}
 
源代码5 项目: hbase   文件: TestBucketCache.java
@Test
public void testRAMCache() {
  int size = 100;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  byte[] byteArr = new byte[length];
  ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
  HFileContext meta = new HFileContextBuilder().build();

  RAMCache cache = new RAMCache();
  BlockCacheKey key1 = new BlockCacheKey("file-1", 1);
  BlockCacheKey key2 = new BlockCacheKey("file-2", 2);
  HFileBlock blk1 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, -1, 52, -1, meta, ByteBuffAllocator.HEAP);
  HFileBlock blk2 = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, -1, -1, -1, meta, ByteBuffAllocator.HEAP);
  RAMQueueEntry re1 = new RAMQueueEntry(key1, blk1, 1, false, ByteBuffAllocator.NONE);
  RAMQueueEntry re2 = new RAMQueueEntry(key1, blk2, 1, false, ByteBuffAllocator.NONE);

  assertFalse(cache.containsKey(key1));
  assertNull(cache.putIfAbsent(key1, re1));
  assertEquals(2, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());

  assertNotNull(cache.putIfAbsent(key1, re2));
  assertEquals(2, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(1, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());

  assertNull(cache.putIfAbsent(key2, re2));
  assertEquals(2, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(2, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());

  cache.remove(key1);
  assertEquals(1, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(2, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());

  cache.clear();
  assertEquals(1, ((HFileBlock) re1.getData()).getBufferReadOnly().refCnt());
  assertEquals(1, ((HFileBlock) re2.getData()).getBufferReadOnly().refCnt());
}
 
源代码6 项目: hbase   文件: TestBucketCache.java
@Test
public void testFreeBlockWhenIOEngineWriteFailure() throws IOException {
  // initialize an block.
  int size = 100, offset = 20;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  ByteBuffer buf = ByteBuffer.allocate(length);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, offset, 52, -1, meta, ByteBuffAllocator.HEAP);

  // initialize an mocked ioengine.
  IOEngine ioEngine = Mockito.mock(IOEngine.class);
  Mockito.when(ioEngine.usesSharedMemory()).thenReturn(false);
  // Mockito.doNothing().when(ioEngine).write(Mockito.any(ByteBuffer.class), Mockito.anyLong());
  Mockito.doThrow(RuntimeException.class).when(ioEngine).write(Mockito.any(ByteBuffer.class),
    Mockito.anyLong());
  Mockito.doThrow(RuntimeException.class).when(ioEngine).write(Mockito.any(ByteBuff.class),
    Mockito.anyLong());

  // create an bucket allocator.
  long availableSpace = 1024 * 1024 * 1024L;
  BucketAllocator allocator = new BucketAllocator(availableSpace, null);

  BlockCacheKey key = new BlockCacheKey("dummy", 1L);
  RAMQueueEntry re = new RAMQueueEntry(key, block, 1, true, ByteBuffAllocator.NONE);

  Assert.assertEquals(0, allocator.getUsedSize());
  try {
    re.writeToCache(ioEngine, allocator, null);
    Assert.fail();
  } catch (Exception e) {
  }
  Assert.assertEquals(0, allocator.getUsedSize());
}
 
源代码7 项目: hbase   文件: HFileBlock.java
/**
 * The uncompressed size of the block data. Does not include header size.
 */
int getUncompressedSizeWithoutHeader() {
  expectState(State.BLOCK_READY);
  return baosInMemory.size() - HConstants.HFILEBLOCK_HEADER_SIZE;
}
 
源代码8 项目: hbase   文件: HFileBlock.java
/**
 * Maps a minor version to the size of the header.
 */
public static int headerSize(boolean usesHBaseChecksum) {
  return usesHBaseChecksum?
      HConstants.HFILEBLOCK_HEADER_SIZE: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM;
}
 
源代码9 项目: hbase   文件: CacheTestUtils.java
public static HFileBlockPair[] generateHFileBlocks(int blockSize, int numBlocks) {
  HFileBlockPair[] returnedBlocks = new HFileBlockPair[numBlocks];
  Random rand = new Random();
  HashSet<String> usedStrings = new HashSet<>();
  for (int i = 0; i < numBlocks; i++) {
    ByteBuffer cachedBuffer = ByteBuffer.allocate(blockSize);
    rand.nextBytes(cachedBuffer.array());
    cachedBuffer.rewind();
    int onDiskSizeWithoutHeader = blockSize;
    int uncompressedSizeWithoutHeader = blockSize;
    long prevBlockOffset = rand.nextLong();
    BlockType.DATA.write(cachedBuffer);
    cachedBuffer.putInt(onDiskSizeWithoutHeader);
    cachedBuffer.putInt(uncompressedSizeWithoutHeader);
    cachedBuffer.putLong(prevBlockOffset);
    cachedBuffer.rewind();
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(false)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(false)
                        .withCompression(Compression.Algorithm.NONE)
                        .withBytesPerCheckSum(0)
                        .withChecksumType(ChecksumType.NULL)
                        .build();
    HFileBlock generated =
        new HFileBlock(BlockType.DATA, onDiskSizeWithoutHeader, uncompressedSizeWithoutHeader,
            prevBlockOffset, ByteBuff.wrap(cachedBuffer), HFileBlock.DONT_FILL_HEADER, blockSize,
            onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE, -1, meta,
            ByteBuffAllocator.HEAP);

    String strKey;
    /* No conflicting keys */
    strKey = Long.toString(rand.nextLong());
    while (!usedStrings.add(strKey)) {
      strKey = Long.toString(rand.nextLong());
    }

    returnedBlocks[i] = new HFileBlockPair();
    returnedBlocks[i].blockName = new BlockCacheKey(strKey, 0);
    returnedBlocks[i].block = generated;
  }
  return returnedBlocks;
}
 
源代码10 项目: hbase   文件: TestRAMCache.java
@Test
public void testAtomicRAMCache() throws Exception {
  int size = 100;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  byte[] byteArr = new byte[length];

  RAMCache cache = new RAMCache();
  BlockCacheKey key = new BlockCacheKey("file-1", 1);
  MockHFileBlock blk = new MockHFileBlock(BlockType.DATA, size, size, -1,
      ByteBuffer.wrap(byteArr, 0, size), HFileBlock.FILL_HEADER, -1, 52, -1,
      new HFileContextBuilder().build(), ByteBuffAllocator.HEAP);
  RAMQueueEntry re = new RAMQueueEntry(key, blk, 1, false, ByteBuffAllocator.NONE);

  Assert.assertNull(cache.putIfAbsent(key, re));
  Assert.assertEquals(cache.putIfAbsent(key, re), re);

  CountDownLatch latch = new CountDownLatch(1);
  blk.setLatch(latch);

  AtomicBoolean error = new AtomicBoolean(false);
  Thread t1 = new Thread(() -> {
    try {
      cache.get(key);
    } catch (Exception e) {
      error.set(true);
    }
  });
  t1.start();
  Thread.sleep(200);

  AtomicBoolean removed = new AtomicBoolean(false);
  Thread t2 = new Thread(() -> {
    cache.remove(key);
    removed.set(true);
  });
  t2.start();
  Thread.sleep(200);
  Assert.assertFalse(removed.get());

  latch.countDown();
  Thread.sleep(200);
  Assert.assertTrue(removed.get());
  Assert.assertFalse(error.get());
}
 
源代码11 项目: hbase   文件: TestBucketCache.java
@Test
public void testCacheBlockNextBlockMetadataMissing() throws Exception {
  int size = 100;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  ByteBuffer buf1 = ByteBuffer.allocate(size), buf2 = ByteBuffer.allocate(size);
  HFileContext meta = new HFileContextBuilder().build();
  ByteBuffAllocator allocator = ByteBuffAllocator.HEAP;
  HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
      ByteBuff.wrap(buf1), HFileBlock.FILL_HEADER, -1, 52, -1, meta, allocator);
  HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
      ByteBuff.wrap(buf2), HFileBlock.FILL_HEADER, -1, -1, -1, meta, allocator);

  BlockCacheKey key = new BlockCacheKey("testCacheBlockNextBlockMetadataMissing", 0);
  ByteBuffer actualBuffer = ByteBuffer.allocate(length);
  ByteBuffer block1Buffer = ByteBuffer.allocate(length);
  ByteBuffer block2Buffer = ByteBuffer.allocate(length);
  blockWithNextBlockMetadata.serialize(block1Buffer, true);
  blockWithoutNextBlockMetadata.serialize(block2Buffer, true);

  // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
    block1Buffer);

  waitUntilFlushedToBucket(cache, key);
  assertNotNull(cache.backingMap.get(key));
  assertEquals(1, cache.backingMap.get(key).refCnt());
  assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
  assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());

  // Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
    block1Buffer);
  assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
  assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
  assertEquals(1, cache.backingMap.get(key).refCnt());

  // Clear and add blockWithoutNextBlockMetadata
  assertTrue(cache.evictBlock(key));
  assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
  assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());

  assertNull(cache.getBlock(key, false, false, false));
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
    block2Buffer);

  waitUntilFlushedToBucket(cache, key);
  assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
  assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());

  // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
    block1Buffer);

  waitUntilFlushedToBucket(cache, key);
  assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
  assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
}
 
源代码12 项目: hbase   文件: TestLruBlockCache.java
@Test
public void testCacheBlockNextBlockMetadataMissing() {
  long maxSize = 100000;
  long blockSize = calculateBlockSize(maxSize, 10);
  int size = 100;
  int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
  byte[] byteArr = new byte[length];
  ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
      ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, 52, -1, meta, HEAP);
  HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1,
      ByteBuff.wrap(buf), HFileBlock.FILL_HEADER, -1, -1, -1, meta, HEAP);

  LruBlockCache cache = new LruBlockCache(maxSize, blockSize, false,
      (int)Math.ceil(1.2*maxSize/blockSize),
      LruBlockCache.DEFAULT_LOAD_FACTOR,
      LruBlockCache.DEFAULT_CONCURRENCY_LEVEL,
      0.66f, // min
      0.99f, // acceptable
      0.33f, // single
      0.33f, // multi
      0.34f, // memory
      1.2f,  // limit
      false,
      1024);

  BlockCacheKey key = new BlockCacheKey("key1", 0);
  ByteBuffer actualBuffer = ByteBuffer.allocate(length);
  ByteBuffer block1Buffer = ByteBuffer.allocate(length);
  ByteBuffer block2Buffer = ByteBuffer.allocate(length);
  blockWithNextBlockMetadata.serialize(block1Buffer, true);
  blockWithoutNextBlockMetadata.serialize(block2Buffer, true);

  //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
      block1Buffer);

  //Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
      block1Buffer);

  //Clear and add blockWithoutNextBlockMetadata
  cache.clearCache();
  assertNull(cache.getBlock(key, false, false, false));
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer,
      block2Buffer);

  //Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
  CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer,
      block1Buffer);
}
 
源代码13 项目: hbase   文件: HFileBlock.java
/**
 * Returns the on-disk size of the data portion of the block. This is the
 * compressed size if compression is enabled. Can only be called in the
 * "block ready" state. Header is not compressed, and its size is not
 * included in the return value.
 *
 * @return the on-disk size of the block, not including the header.
 */
int getOnDiskSizeWithoutHeader() {
  expectState(State.BLOCK_READY);
  return onDiskBlockBytesWithHeader.size() +
      onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE;
}