类org.apache.hadoop.hbase.io.FSDataInputStreamWrapper源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.io.FSDataInputStreamWrapper的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: phoenix   文件: IndexHalfStoreFileReader.java
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param in
 * @param size
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final FSDataInputStreamWrapper in, long size, final Reference r,
        final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final HRegionInfo regionInfo,
        byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
    super(fs, p, in, size, cacheConf, conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.regionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
}
 
源代码2 项目: ranger   文件: RangerAuthorizationCoprocessor.java
@Override
public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size,
											  CacheConfig cacheConf, Reference r, StoreFileReader reader) throws IOException {
	final StoreFileReader ret;

	if(LOG.isDebugEnabled()) {
		LOG.debug("==> RangerAuthorizationCoprocessor.preStoreFileReaderOpen()");
	}

	try {
		activatePluginClassLoader();
		ret = implRegionObserver.preStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, reader);
	} finally {
		deactivatePluginClassLoader();
	}

	if(LOG.isDebugEnabled()) {
		LOG.debug("<== RangerAuthorizationCoprocessor.preStoreFileReaderOpen()");
	}

	return ret;
}
 
源代码3 项目: ranger   文件: RangerAuthorizationCoprocessor.java
@Override
public StoreFileReader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs,	Path p, FSDataInputStreamWrapper in, long size,
											   CacheConfig cacheConf, Reference r, StoreFileReader reader) throws IOException {
	final StoreFileReader ret;

	if(LOG.isDebugEnabled()) {
		LOG.debug("==> RangerAuthorizationCoprocessor.postStoreFileReaderOpen()");
	}

	try {
		activatePluginClassLoader();
		ret = implRegionObserver.postStoreFileReaderOpen(ctx, fs, p, in, size, cacheConf, r, reader);
	} finally {
		deactivatePluginClassLoader();
	}

	if(LOG.isDebugEnabled()) {
		LOG.debug("<== RangerAuthorizationCoprocessor.postStoreFileReaderOpen()");
	}

	return ret;
}
 
源代码4 项目: hbase   文件: RegionCoprocessorHost.java
/**
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @return a Reader instance to use instead of the base reader if overriding
 * default behavior, null otherwise
 * @throws IOException
 */
public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p,
    final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
    final Reference r) throws IOException {
  if (coprocEnvironments.isEmpty()) {
    return null;
  }
  return execOperationWithResult(
      new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, null) {
        @Override
        public StoreFileReader call(RegionObserver observer) throws IOException {
          return observer.preStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r,
              getResult());
        }
      });
}
 
源代码5 项目: hbase   文件: RegionCoprocessorHost.java
/**
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader instance
 * @return The reader to use
 * @throws IOException
 */
public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p,
    final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf,
    final Reference r, final StoreFileReader reader) throws IOException {
  if (this.coprocEnvironments.isEmpty()) {
    return reader;
  }
  return execOperationWithResult(
      new ObserverOperationWithResult<RegionObserver, StoreFileReader>(regionObserverGetter, reader) {
        @Override
        public StoreFileReader call(RegionObserver observer) throws IOException {
          return observer.postStoreFileReaderOpen(this, fs, p, in, size, cacheConf, r,
              getResult());
        }
      });
}
 
源代码6 项目: hbase   文件: HFile.java
/**
 * @param fs filesystem
 * @param path Path to file to read
 * @param cacheConf This must not be null. @see
 *          {@link org.apache.hadoop.hbase.io.hfile.CacheConfig#CacheConfig(Configuration)}
 * @param primaryReplicaReader true if this is a reader for primary replica
 * @param conf Configuration
 * @return an active Reader instance
 * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile
 *           is corrupt/invalid.
 */
public static Reader createReader(FileSystem fs, Path path, CacheConfig cacheConf,
    boolean primaryReplicaReader, Configuration conf) throws IOException {
  Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf");
  FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path);
  ReaderContext context = new ReaderContextBuilder()
      .withFilePath(path)
      .withInputStreamWrapper(stream)
      .withFileSize(fs.getFileStatus(path).getLen())
      .withFileSystem(stream.getHfs())
      .withPrimaryReplicaReader(primaryReplicaReader)
      .withReaderType(ReaderType.PREAD)
      .build();
  HFileInfo fileInfo = new HFileInfo(context, conf);
  Reader reader = createReader(context, fileInfo, cacheConf, conf);
  fileInfo.initMetaAndIndex(reader);
  return reader;
}
 
源代码7 项目: phoenix   文件: IndexHalfStoreFileReader.java
/**
 * @param fs
 * @param p
 * @param cacheConf
 * @param in
 * @param size
 * @param r
 * @param conf
 * @param indexMaintainers
 * @param viewConstants
 * @param regionInfo
 * @param regionStartKeyInHFile
 * @param splitKey
 * @throws IOException
 */
public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig cacheConf,
        final FSDataInputStreamWrapper in, long size, final Reference r,
        final Configuration conf,
        final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
        final byte[][] viewConstants, final RegionInfo regionInfo,
        byte[] regionStartKeyInHFile, byte[] splitKey, boolean primaryReplicaStoreFile,
        AtomicInteger refCount, RegionInfo currentRegion) throws IOException {
    super(fs, p, in, size, cacheConf, primaryReplicaStoreFile, refCount, false,
            conf);
    this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
    // Is it top or bottom half?
    this.top = Reference.isTopFileRegion(r.getFileRegion());
    this.splitRow = CellUtil.cloneRow(new KeyValue.KeyOnlyKeyValue(splitkey));
    this.indexMaintainers = indexMaintainers;
    this.viewConstants = viewConstants;
    this.childRegionInfo = regionInfo;
    this.regionStartKeyInHFile = regionStartKeyInHFile;
    this.offset = regionStartKeyInHFile.length;
    this.refCount = refCount;
    this.currentRegion = currentRegion;
}
 
源代码8 项目: hbase   文件: ReaderContext.java
public ReaderContext(Path filePath, FSDataInputStreamWrapper fsdis, long fileSize,
    HFileSystem hfs, boolean primaryReplicaReader, ReaderType type) {
  this.filePath = filePath;
  this.fsdis = fsdis;
  this.fileSize = fileSize;
  this.hfs = hfs;
  this.primaryReplicaReader = primaryReplicaReader;
  this.type = type;
}
 
源代码9 项目: hbase   文件: ReaderContextBuilder.java
@VisibleForTesting
public ReaderContextBuilder withFileSystemAndPath(FileSystem fs, Path filePath)
    throws IOException {
  this.withFileSystem(fs)
      .withFilePath(filePath)
      .withFileSize(fs.getFileStatus(filePath).getLen())
      .withInputStreamWrapper(new FSDataInputStreamWrapper(fs, filePath));
  return this;
}
 
源代码10 项目: hbase   文件: SimpleRegionObserver.java
@Override
public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
    FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
    Reference r, StoreFileReader reader) throws IOException {
  ctPreStoreFileReaderOpen.incrementAndGet();
  return null;
}
 
源代码11 项目: hbase   文件: SimpleRegionObserver.java
@Override
public StoreFileReader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
    FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
    Reference r, StoreFileReader reader) throws IOException {
  ctPostStoreFileReaderOpen.incrementAndGet();
  return reader;
}
 
源代码12 项目: hbase   文件: TestChecksum.java
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
  Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
  FSDataOutputStream os = fs.create(path);
  HFileContext meta = new HFileContextBuilder().build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(BlockType.DATA);
  for (int i = 0; i < 1000; ++i)
    dos.writeInt(i);
  hbw.writeHeaderAndData(os);
  int totalSize = hbw.getOnDiskSizeWithHeader();
  os.close();

  // Use hbase checksums.
  assertEquals(true, hfs.useHBaseChecksum());

  FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
  meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
  ReaderContext context = new ReaderContextBuilder()
      .withInputStreamWrapper(is)
      .withFileSize(totalSize)
      .withFileSystem((HFileSystem) fs)
      .withFilePath(path)
      .build();
  HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context,
      meta, ByteBuffAllocator.HEAP);
  HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
  assertTrue(!b.isSharedMem());
  assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
 
源代码13 项目: hbase   文件: TestLazyDataBlockDecompression.java
/**
 * Read all blocks from {@code path} to populate {@code blockCache}.
 */
private static void cacheBlocks(Configuration conf, CacheConfig cacheConfig, FileSystem fs,
    Path path, HFileContext cxt) throws IOException {
  FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path);
  long fileSize = fs.getFileStatus(path).getLen();
  FixedFileTrailer trailer =
    FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
  ReaderContext context = new ReaderContextBuilder()
      .withFilePath(path)
      .withFileSize(fileSize)
      .withFileSystem(fsdis.getHfs())
      .withInputStreamWrapper(fsdis)
      .build();
  HFileInfo fileInfo = new HFileInfo(context, conf);
  HFile.Reader reader = new HFilePreadReader(context, fileInfo, cacheConfig, conf);
  fileInfo.initMetaAndIndex(reader);
  long offset = trailer.getFirstDataBlockOffset(),
    max = trailer.getLastDataBlockOffset();
  List<HFileBlock> blocks = new ArrayList<>(4);
  HFileBlock block;
  while (offset <= max) {
    block = reader.readBlock(offset, -1, /* cacheBlock */ true, /* pread */ false,
    /* isCompaction */ false, /* updateCacheMetrics */ true, null, null);
    offset += block.getOnDiskSizeWithHeader();
    blocks.add(block);
  }
  LOG.info("read " + Iterables.toString(blocks));
  reader.close();
}
 
源代码14 项目: hbase   文件: MetricsRegionServerWrapperImpl.java
@Override
public long getTotalBytesRead() {
  return FSDataInputStreamWrapper.getTotalBytesRead();
}
 
源代码15 项目: hbase   文件: MetricsRegionServerWrapperImpl.java
@Override
public long getLocalBytesRead() {
  return FSDataInputStreamWrapper.getLocalBytesRead();
}
 
源代码16 项目: hbase   文件: MetricsRegionServerWrapperImpl.java
@Override
public long getShortCircuitBytesRead() {
  return FSDataInputStreamWrapper.getShortCircuitBytesRead();
}
 
源代码17 项目: hbase   文件: MetricsRegionServerWrapperImpl.java
@Override
public long getZeroCopyBytesRead() {
  return FSDataInputStreamWrapper.getZeroCopyBytesRead();
}
 
源代码18 项目: hbase   文件: HFilePrettyPrinter.java
public int processFile(Path file, boolean checkRootDir) throws IOException {
  if (verbose) {
    out.println("Scanning -> " + file);
  }

  if (checkRootDir) {
    Path rootPath = CommonFSUtils.getRootDir(getConf());
    String rootString = rootPath + rootPath.SEPARATOR;
    if (!file.toString().startsWith(rootString)) {
      // First we see if fully-qualified URI matches the root dir. It might
      // also be an absolute path in the same filesystem, so we prepend the FS
      // of the root dir and see if that fully-qualified URI matches.
      FileSystem rootFS = rootPath.getFileSystem(getConf());
      String qualifiedFile = rootFS.getUri().toString() + file.toString();
      if (!qualifiedFile.startsWith(rootString)) {
        err.println(
          "ERROR, file (" + file + ") is not in HBase's root directory (" + rootString + ")");
        return -2;
      }
    }
  }

  FileSystem fs = file.getFileSystem(getConf());
  if (!fs.exists(file)) {
    err.println("ERROR, file doesnt exist: " + file);
    return -2;
  }

  HFile.Reader reader = HFile.createReader(fs, file, CacheConfig.DISABLED, true, getConf());

  Map<byte[], byte[]> fileInfo = reader.getHFileInfo();

  KeyValueStatsCollector fileStats = null;

  if (verbose || printKey || checkRow || checkFamily || printStats || checkMobIntegrity) {
    // scan over file and read key/value's and check if requested
    HFileScanner scanner = reader.getScanner(false, false, false);
    fileStats = new KeyValueStatsCollector();
    boolean shouldScanKeysValues = false;
    if (this.isSeekToRow) {
      // seek to the first kv on this row
      shouldScanKeysValues =
        (scanner.seekTo(PrivateCellUtil.createFirstOnRow(this.row)) != -1);
    } else {
      shouldScanKeysValues = scanner.seekTo();
    }
    if (shouldScanKeysValues)
      scanKeysValues(file, fileStats, scanner, row);
  }

  // print meta data
  if (shouldPrintMeta) {
    printMeta(reader, fileInfo);
  }

  if (printBlockIndex) {
    out.println("Block Index:");
    out.println(reader.getDataBlockIndexReader());
  }

  if (printBlockHeaders) {
    out.println("Block Headers:");
    /*
     * TODO: this same/similar block iteration logic is used in HFileBlock#blockRange and
     * TestLazyDataBlockDecompression. Refactor?
     */
    FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, file);
    long fileSize = fs.getFileStatus(file).getLen();
    FixedFileTrailer trailer =
      FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize);
    long offset = trailer.getFirstDataBlockOffset(),
      max = trailer.getLastDataBlockOffset();
    HFileBlock block;
    while (offset <= max) {
      block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false,
        /* isCompaction */ false, /* updateCacheMetrics */ false, null, null);
      offset += block.getOnDiskSizeWithHeader();
      out.println(block);
    }
  }

  if (printStats) {
    fileStats.finish();
    out.println("Stats:\n" + fileStats);
  }

  reader.close();
  return 0;
}
 
源代码19 项目: hbase   文件: ReaderContext.java
public FSDataInputStreamWrapper getInputStreamWrapper() {
  return this.fsdis;
}
 
源代码20 项目: hbase   文件: ReaderContextBuilder.java
public ReaderContextBuilder withInputStreamWrapper(FSDataInputStreamWrapper fsdis) {
  this.fsdis = fsdis;
  return this;
}
 
源代码21 项目: hbase   文件: TestHFileBlock.java
protected void testReaderV2Internals() throws IOException {
  if(includesTag) {
    TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  }
  for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
    for (boolean pread : new boolean[] { false, true }) {
        LOG.info("testReaderV2: Compression algorithm: " + algo +
                 ", pread=" + pread);
      Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
          + algo);
      FSDataOutputStream os = fs.create(path);
      HFileContext meta = new HFileContextBuilder()
                         .withCompression(algo)
                         .withIncludesMvcc(includesMemstoreTS)
                         .withIncludesTags(includesTag)
                         .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                         .build();
      HFileBlock.Writer hbw = new HFileBlock.Writer(null,
         meta);
      long totalSize = 0;
      for (int blockId = 0; blockId < 2; ++blockId) {
        DataOutputStream dos = hbw.startWriting(BlockType.DATA);
        for (int i = 0; i < 1234; ++i)
          dos.writeInt(i);
        hbw.writeHeaderAndData(os);
        totalSize += hbw.getOnDiskSizeWithHeader();
      }
      os.close();

      FSDataInputStream is = fs.open(path);
      meta = new HFileContextBuilder()
          .withHBaseCheckSum(true)
          .withIncludesMvcc(includesMemstoreTS)
          .withIncludesTags(includesTag)
          .withCompression(algo).build();
      ReaderContext context = new ReaderContextBuilder()
          .withInputStreamWrapper(new FSDataInputStreamWrapper(is))
          .withFileSize(totalSize)
          .withFilePath(path)
          .withFileSystem(fs)
          .build();
      HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc);
      HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
      is.close();
      assertEquals(0, HFile.getAndResetChecksumFailuresCount());

      b.sanityCheck();
      assertEquals(4936, b.getUncompressedSizeWithoutHeader());
      assertEquals(algo == GZ ? 2173 : 4936,
                   b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
      HFileBlock expected = b;

      if (algo == GZ) {
        is = fs.open(path);
        ReaderContext readerContext = new ReaderContextBuilder()
            .withInputStreamWrapper(new FSDataInputStreamWrapper(is))
            .withFileSize(totalSize)
            .withFilePath(path)
            .withFileSystem(fs)
            .build();
        hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc);
        b = hbr.readBlockData(0,
          2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false, true);
        assertEquals(expected, b);
        int wrongCompressedSize = 2172;
        try {
          hbr.readBlockData(0, wrongCompressedSize + HConstants.HFILEBLOCK_HEADER_SIZE, pread,
            false, true);
          fail("Exception expected");
        } catch (IOException ex) {
          String expectedPrefix = "Passed in onDiskSizeWithHeader=";
          assertTrue("Invalid exception message: '" + ex.getMessage()
              + "'.\nMessage is expected to start with: '" + expectedPrefix
              + "'", ex.getMessage().startsWith(expectedPrefix));
        }
        assertRelease(b);
        is.close();
      }
      assertRelease(expected);
    }
  }
}
 
源代码22 项目: hbase   文件: TestHFileBlock.java
protected void testConcurrentReadingInternals() throws IOException,
    InterruptedException, ExecutionException {
  for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
    Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
    Random rand = defaultRandom();
    List<Long> offsets = new ArrayList<>();
    List<BlockType> types = new ArrayList<>();
    writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
    FSDataInputStream is = fs.open(path);
    long fileSize = fs.getFileStatus(path).getLen();
    HFileContext meta = new HFileContextBuilder()
                        .withHBaseCheckSum(true)
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withCompression(compressAlgo)
                        .build();
    ReaderContext context = new ReaderContextBuilder()
        .withInputStreamWrapper(new FSDataInputStreamWrapper(is))
        .withFileSize(fileSize)
        .withFilePath(path)
        .withFileSystem(fs)
        .build();
    HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc);

    Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
    ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<>(exec);

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr,
          offsets, types, fileSize));
    }

    for (int i = 0; i < NUM_READER_THREADS; ++i) {
      Future<Boolean> result = ecs.take();
      assertTrue(result.get());
      if (detailedLogging) {
        LOG.info(String.valueOf(i + 1)
          + " reader threads finished successfully (algo=" + compressAlgo
          + ")");
      }
    }
    is.close();
  }
}
 
源代码23 项目: hbase   文件: TestHFileEncryption.java
@Test
public void testDataBlockEncryption() throws IOException {
  final int blocks = 10;
  final int[] blockSizes = new int[blocks];
  for (int i = 0; i < blocks; i++) {
    blockSizes[i] = (1024 + RNG.nextInt(1024 * 63)) / Bytes.SIZEOF_INT;
  }
  for (Compression.Algorithm compression : TestHFileBlock.COMPRESSION_ALGORITHMS) {
    Path path = new Path(TEST_UTIL.getDataTestDir(), "block_v3_" + compression + "_AES");
    LOG.info("testDataBlockEncryption: encryption=AES compression=" + compression);
    long totalSize = 0;
    HFileContext fileContext = new HFileContextBuilder()
      .withCompression(compression)
      .withEncryptionContext(cryptoContext)
      .build();
    FSDataOutputStream os = fs.create(path);
    try {
      for (int i = 0; i < blocks; i++) {
        totalSize += writeBlock(os, fileContext, blockSizes[i]);
      }
    } finally {
      os.close();
    }
    FSDataInputStream is = fs.open(path);
    ReaderContext context = new ReaderContextBuilder()
        .withInputStreamWrapper(new FSDataInputStreamWrapper(is))
        .withFilePath(path)
        .withFileSystem(fs)
        .withFileSize(totalSize).build();
    try {
      HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(context, fileContext,
          ByteBuffAllocator.HEAP);
      long pos = 0;
      for (int i = 0; i < blocks; i++) {
        pos += readAndVerifyBlock(pos, fileContext, hbr, blockSizes[i]);
      }
    } finally {
      is.close();
    }
  }
}
 
源代码24 项目: hbase   文件: TestChecksum.java
@Test
public void testVerifyCheckSum() throws IOException {
  int intCount = 10000;
  for (ChecksumType ckt : ChecksumType.values()) {
    Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + ckt.getName());
    FSDataOutputStream os = fs.create(path);
    HFileContext meta = new HFileContextBuilder()
          .withChecksumType(ckt)
          .build();
    HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
    DataOutputStream dos = hbw.startWriting(BlockType.DATA);
    for (int i = 0; i < intCount; ++i) {
      dos.writeInt(i);
    }
    hbw.writeHeaderAndData(os);
    int totalSize = hbw.getOnDiskSizeWithHeader();
    os.close();

    // Use hbase checksums.
    assertEquals(true, hfs.useHBaseChecksum());

    FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
    meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
    ReaderContext context = new ReaderContextBuilder()
        .withInputStreamWrapper(is)
        .withFileSize(totalSize)
        .withFileSystem((HFileSystem) fs)
        .withFilePath(path)
        .build();
    HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context,
        meta, ByteBuffAllocator.HEAP);
    HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
    assertTrue(!b.isSharedMem());

    // verify SingleByteBuff checksum.
    verifySBBCheckSum(b.getBufferReadOnly());

    // verify MultiByteBuff checksum.
    verifyMBBCheckSum(b.getBufferReadOnly());

    ByteBuff data = b.getBufferWithoutHeader();
    for (int i = 0; i < intCount; i++) {
      assertEquals(i, data.getInt());
    }
    try {
      data.getInt();
      fail();
    } catch (BufferUnderflowException e) {
      // expected failure
    }
    assertEquals(0, HFile.getAndResetChecksumFailuresCount());
  }
}
 
源代码25 项目: hbase   文件: TestChecksum.java
protected void testChecksumInternals(boolean useTags) throws IOException {
  Compression.Algorithm algo = NONE;
  for (boolean pread : new boolean[] { false, true }) {
    for (int bytesPerChecksum : BYTES_PER_CHECKSUM) {
      Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" +
                           algo + bytesPerChecksum);
      FSDataOutputStream os = fs.create(path);
      HFileContext meta = new HFileContextBuilder()
                          .withCompression(algo)
                          .withIncludesMvcc(true)
                          .withIncludesTags(useTags)
                          .withHBaseCheckSum(true)
                          .withBytesPerCheckSum(bytesPerChecksum)
                          .build();
      HFileBlock.Writer hbw = new HFileBlock.Writer(null,
         meta);

      // write one block. The block has data
      // that is at least 6 times more than the checksum chunk size
      long dataSize = 0;
      DataOutputStream dos = hbw.startWriting(BlockType.DATA);
      for (; dataSize < 6 * bytesPerChecksum;) {
        for (int i = 0; i < 1234; ++i) {
          dos.writeInt(i);
          dataSize += 4;
        }
      }
      hbw.writeHeaderAndData(os);
      long totalSize = hbw.getOnDiskSizeWithHeader();
      os.close();

      long expectedChunks = ChecksumUtil.numChunks(
                             dataSize + HConstants.HFILEBLOCK_HEADER_SIZE,
                             bytesPerChecksum);
      LOG.info("testChecksumChunks: pread={}, bytesPerChecksum={}, fileSize={}, "
              + "dataSize={}, expectedChunks={}, compression={}", pread, bytesPerChecksum,
          totalSize, dataSize, expectedChunks, algo.toString());

      // Verify hbase checksums.
      assertEquals(true, hfs.useHBaseChecksum());

      // Read data back from file.
      FSDataInputStream is = fs.open(path);
      FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
      meta = new HFileContextBuilder()
             .withCompression(algo)
             .withIncludesMvcc(true)
             .withIncludesTags(useTags)
             .withHBaseCheckSum(true)
             .withBytesPerCheckSum(bytesPerChecksum)
             .build();
      ReaderContext context = new ReaderContextBuilder()
          .withInputStreamWrapper(new FSDataInputStreamWrapper(is, nochecksum))
          .withFileSize(totalSize)
          .withFileSystem(hfs)
          .withFilePath(path)
          .build();
      HFileBlock.FSReader hbr =
          new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP);
      HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
      assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
      is.close();
      b.sanityCheck();
      assertEquals(dataSize, b.getUncompressedSizeWithoutHeader());

      // verify that we have the expected number of checksum chunks
      assertEquals(totalSize, HConstants.HFILEBLOCK_HEADER_SIZE + dataSize +
                   expectedChunks * HFileBlock.CHECKSUM_SIZE);

      // assert that we did not encounter hbase checksum verification failures
      assertEquals(0, HFile.getAndResetChecksumFailuresCount());
    }
  }
}
 
源代码26 项目: hbase   文件: RegionObserver.java
/**
 * Called before creation of Reader for a store file.
 * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
 * effect in this hook.
 *
 * @param ctx the environment provided by the region server
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
 * @return a Reader instance to use instead of the base reader if overriding
 * default behavior, null otherwise
 * @deprecated For Phoenix only, StoreFileReader is not a stable interface.
 */
@Deprecated
// Passing InterfaceAudience.Private args FSDataInputStreamWrapper, CacheConfig and Reference.
// This is fine as the hook is deprecated any way.
default StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
    FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
    Reference r, StoreFileReader reader) throws IOException {
  return reader;
}
 
源代码27 项目: hbase   文件: RegionObserver.java
/**
 * Called after the creation of Reader for a store file.
 *
 * @param ctx the environment provided by the region server
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader instance
 * @return The reader to use
 * @deprecated For Phoenix only, StoreFileReader is not a stable interface.
 */
@Deprecated
// Passing InterfaceAudience.Private args FSDataInputStreamWrapper, CacheConfig and Reference.
// This is fine as the hook is deprecated any way.
default StoreFileReader postStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
    FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
    Reference r, StoreFileReader reader) throws IOException {
  return reader;
}
 
 类所在包
 类方法
 同包方法