类org.apache.hadoop.hbase.io.compress.Compression源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.io.compress.Compression的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: flinkDemo   文件: HBaseUtils.java
/**
 * 创建一个表,这个表没有任何region
 *
 * @param tableName 表名
 * @param cfs       列族
 * @throws Exception Exception
 */
public static void createTable(String tableName, String... cfs) throws Exception {
    Admin admin = null;
    try {
        admin = HBaseUtils.getConnection().getAdmin();
        HTableDescriptor hTableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
        for (String family : cfs) {
            HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(family);
            hColumnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
            hTableDescriptor.addFamily(hColumnDescriptor);
            hColumnDescriptor.setMaxVersions(3);
        }
        admin.createTable(hTableDescriptor);
        LOGGER.info("create table " + tableName + " seccuss.");
    } finally {
        HBaseUtils.closeAdmin(admin);
    }
}
 
源代码2 项目: atlas   文件: HBaseBasedAuditRepository.java
private void createTableIfNotExists() throws AtlasException {
    Admin admin = null;
    try {
        admin = connection.getAdmin();
        LOG.info("Checking if table {} exists", tableName.getNameAsString());
        if (!admin.tableExists(tableName)) {
            LOG.info("Creating table {}", tableName.getNameAsString());
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY);
            columnFamily.setMaxVersions(1);
            columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
            columnFamily.setCompressionType(Compression.Algorithm.GZ);
            columnFamily.setBloomFilterType(BloomType.ROW);
            tableDescriptor.addFamily(columnFamily);
            admin.createTable(tableDescriptor);
        } else {
            LOG.info("Table {} exists", tableName.getNameAsString());
        }
    } catch (IOException e) {
        throw new AtlasException(e);
    } finally {
        close(admin);
    }
}
 
private void createTableIfNotExists() throws AtlasException {
    Admin admin = null;
    try {
        admin = connection.getAdmin();
        LOG.info("Checking if table {} exists", tableName.getNameAsString());
        if (!admin.tableExists(tableName)) {
            LOG.info("Creating table {}", tableName.getNameAsString());
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY);
            columnFamily.setMaxVersions(1);
            columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
            columnFamily.setCompressionType(Compression.Algorithm.GZ);
            columnFamily.setBloomFilterType(BloomType.ROW);
            tableDescriptor.addFamily(columnFamily);
            admin.createTable(tableDescriptor);
        } else {
            LOG.info("Table {} exists", tableName.getNameAsString());
        }
    } catch (IOException e) {
        throw new AtlasException(e);
    } finally {
        close(admin);
    }
}
 
源代码4 项目: hbase   文件: ThriftUtilities.java
/**
 * This utility method creates a new Hbase HColumnDescriptor object based on a
 * Thrift ColumnDescriptor "struct".
 *
 * @param in Thrift ColumnDescriptor object
 * @return ModifyableColumnFamilyDescriptor
 * @throws IllegalArgument if the column name is empty
 */
static public ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor colDescFromThrift(
    ColumnDescriptor in) throws IllegalArgument {
  Compression.Algorithm comp =
    Compression.getCompressionAlgorithmByName(in.compression.toLowerCase(Locale.ROOT));
  BloomType bt =
    BloomType.valueOf(in.bloomFilterType);

  if (in.name == null || !in.name.hasRemaining()) {
    throw new IllegalArgument("column name is empty");
  }
  byte [] parsedName = CellUtil.parseColumn(Bytes.getBytes(in.name))[0];
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(parsedName)
      .setMaxVersions(in.maxVersions)
      .setCompressionType(comp)
      .setInMemory(in.inMemory)
      .setBlockCacheEnabled(in.blockCacheEnabled)
      .setTimeToLive(in.timeToLive > 0 ? in.timeToLive : Integer.MAX_VALUE)
      .setBloomFilterType(bt);
  return familyDescriptor;
}
 
源代码5 项目: hbase   文件: CompressionTest.java
public static boolean testCompression(String codec) {
  codec = codec.toLowerCase(Locale.ROOT);

  Compression.Algorithm a;

  try {
    a = Compression.getCompressionAlgorithmByName(codec);
  } catch (IllegalArgumentException e) {
    LOG.warn("Codec type: " + codec + " is not known");
    return false;
  }

  try {
    testCompression(a);
    return true;
  } catch (IOException ignored) {
    LOG.warn("Can't instantiate codec: " + codec, ignored);
    return false;
  }
}
 
源代码6 项目: hbase   文件: CompressionTest.java
public static void testCompression(Compression.Algorithm algo)
    throws IOException {
  if (compressionTestResults[algo.ordinal()] != null) {
    if (compressionTestResults[algo.ordinal()]) {
      return ; // already passed test, dont do it again.
    } else {
      // failed.
      throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" +
      " previously failed test.");
    }
  }

  try {
    Compressor c = algo.getCompressor();
    algo.returnCompressor(c);
    compressionTestResults[algo.ordinal()] = true; // passes
  } catch (Throwable t) {
    compressionTestResults[algo.ordinal()] = false; // failure
    throw new DoNotRetryIOException(t);
  }
}
 
源代码7 项目: hbase   文件: TestHFileOutputFormat2.java
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm>
    getMockColumnFamiliesForCompression (int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression = new HashMap<>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
源代码8 项目: hbase   文件: HStore.java
private HFileContext createFileContext(Compression.Algorithm compression,
    boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
  if (compression == null) {
    compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  HFileContext hFileContext = new HFileContextBuilder()
                              .withIncludesMvcc(includeMVCCReadpoint)
                              .withIncludesTags(includesTag)
                              .withCompression(compression)
                              .withCompressTags(family.isCompressTags())
                              .withChecksumType(checksumType)
                              .withBytesPerCheckSum(bytesPerChecksum)
                              .withBlockSize(blocksize)
                              .withHBaseCheckSum(true)
                              .withDataBlockEncoding(family.getDataBlockEncoding())
                              .withEncryptionContext(cryptoContext)
                              .withCreateTime(EnvironmentEdgeManager.currentTime())
                              .withColumnFamily(family.getName())
                              .withTableName(region.getTableDescriptor()
                                  .getTableName().getName())
                              .withCellComparator(this.comparator)
                              .build();
  return hFileContext;
}
 
源代码9 项目: hbase   文件: FixedFileTrailer.java
/**
 * Deserialize the file trailer as writable data
 */
void deserializeFromWritable(DataInput input) throws IOException {
  fileInfoOffset = input.readLong();
  loadOnOpenDataOffset = input.readLong();
  dataIndexCount = input.readInt();
  uncompressedDataIndexSize = input.readLong();
  metaIndexCount = input.readInt();

  totalUncompressedBytes = input.readLong();
  entryCount = input.readLong();
  compressionCodec = Compression.Algorithm.values()[input.readInt()];
  numDataIndexLevels = input.readInt();
  firstDataBlockOffset = input.readLong();
  lastDataBlockOffset = input.readLong();
  // TODO this is a classname encoded into an  HFile's trailer. We are going to need to have 
  // some compat code here.
  setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input,
    MAX_COMPARATOR_NAME_LENGTH)));
}
 
源代码10 项目: spliceengine   文件: SpliceDefaultCompactor.java
/**
 *
 * This is borrowed from DefaultCompactor.
 *
 * @param compression
 * @param includeMVCCReadpoint
 * @param includesTag
 * @param cryptoContext
 * @return
 */
private HFileContext createFileContext(Compression.Algorithm compression,
                                       boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) {
    if (compression == null) {
        compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    }
    HFileContext hFileContext = new HFileContextBuilder()
            .withIncludesMvcc(includeMVCCReadpoint)
            .withIncludesTags(includesTag)
            .withCompression(compression)
            .withCompressTags(store.getColumnFamilyDescriptor().isCompressTags())
            .withChecksumType(HStore.getChecksumType(conf))
            .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
            .withBlockSize(store.getColumnFamilyDescriptor().getBlocksize())
            .withHBaseCheckSum(true)
            .withDataBlockEncoding(store.getColumnFamilyDescriptor().getDataBlockEncoding())
            .withEncryptionContext(cryptoContext)
            .withCreateTime(EnvironmentEdgeManager.currentTime())
            .build();
    return hFileContext;
}
 
源代码11 项目: hbase   文件: HBaseTestingUtility.java
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
  List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
          ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
        columnFamilyDescriptorBuilder.setCompressionType(compressionType);
        columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
        columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
        columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
        familyId++;
      }
    }
  }
  return columnFamilyDescriptors;
}
 
源代码12 项目: hbase   文件: TestScanWithBloomError.java
@Test
public void testThreeStoreFiles() throws IOException {
  ColumnFamilyDescriptor columnFamilyDescriptor =
    ColumnFamilyDescriptorBuilder
      .newBuilder(Bytes.toBytes(FAMILY))
      .setCompressionType(Compression.Algorithm.GZ)
      .setBloomFilterType(bloomType)
      .setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS).build();
  region = TEST_UTIL.createTestRegion(TABLE_NAME, columnFamilyDescriptor);
  createStoreFile(new int[] {1, 2, 6});
  createStoreFile(new int[] {1, 2, 3, 7});
  createStoreFile(new int[] {1, 9});
  scanColSet(new int[]{1, 4, 6, 7}, new int[]{1, 6, 7});

  HBaseTestingUtility.closeRegionAndWAL(region);
}
 
源代码13 项目: hbase   文件: TestBlocksScanned.java
@Test
public void testBlocksScannedWithEncoding() throws Exception {
  byte [] tableName = Bytes.toBytes("TestBlocksScannedWithEncoding");
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf(tableName));

  tableDescriptor.setColumnFamily(
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY)
      .setMaxVersions(10)
      .setBlockCacheEnabled(true)
      .setDataBlockEncoding(DataBlockEncoding.FAST_DIFF)
      .setBlocksize(BLOCK_SIZE)
      .setCompressionType(Compression.Algorithm.NONE)
      );
  _testBlocksScanned(tableDescriptor);
}
 
源代码14 项目: hbase   文件: TestHFileBlock.java
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
 
源代码15 项目: hbase   文件: TestHFile.java
@Test
public void testNullMetaBlocks() throws Exception {
  for (Compression.Algorithm compressAlgo :
      HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
    Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile");
    FSDataOutputStream fout = createFSOutput(mFile);
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo)
                        .withBlockSize(minBlockSize).build();
    Writer writer = HFile.getWriterFactory(conf, cacheConf)
        .withOutputStream(fout)
        .withFileContext(meta)
        .create();
    KeyValue kv = new KeyValue(Bytes.toBytes("foo"), Bytes.toBytes("f1"), null,
        Bytes.toBytes("value"));
    writer.append(kv);
    writer.close();
    fout.close();
    Reader reader = HFile.createReader(fs, mFile, cacheConf, true, conf);
    assertNull(reader.getMetaBlock("non-existant", false));
  }
}
 
源代码16 项目: hbase   文件: TestSeekToBlockWithEncoders.java
private void seekToTheKey(KeyValue expected, List<KeyValue> kvs, Cell toSeek)
    throws IOException {
  // create all seekers
  List<DataBlockEncoder.EncodedSeeker> encodedSeekers = new ArrayList<>();
  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null) {
      continue;
    }
    DataBlockEncoder encoder = encoding.getEncoder();
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
        .withIncludesMvcc(false).withIncludesTags(false)
        .withCompression(Compression.Algorithm.NONE).build();
    HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding,
        HFILEBLOCK_DUMMY_HEADER, meta);
    ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs,
        encodingContext, this.useOffheapData);
    DataBlockEncoder.EncodedSeeker seeker =
      encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
    seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
    encodedSeekers.add(seeker);
  }
  // test it!
  // try a few random seeks
  checkSeekingConsistency(encodedSeekers, toSeek, expected);
}
 
源代码17 项目: hbase   文件: HFileContext.java
HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
             Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
             int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
             Encryption.Context cryptoContext, long fileCreateTime, String hfileName,
             byte[] columnFamily, byte[] tableName, CellComparator cellComparator) {
  this.usesHBaseChecksum = useHBaseChecksum;
  this.includesMvcc =  includesMvcc;
  this.includesTags = includesTags;
  this.compressAlgo = compressAlgo;
  this.compressTags = compressTags;
  this.checksumType = checksumType;
  this.bytesPerChecksum = bytesPerChecksum;
  this.blocksize = blockSize;
  if (encoding != null) {
    this.encoding = encoding;
  }
  this.cryptoContext = cryptoContext;
  this.fileCreateTime = fileCreateTime;
  this.hfileName = hfileName;
  this.columnFamily = columnFamily;
  this.tableName = tableName;
  // If no cellComparator specified, make a guess based off tablename. If hbase:meta, then should
  // be the meta table comparator. Comparators are per table.
  this.cellComparator = cellComparator != null ? cellComparator : this.tableName != null ?
    CellComparatorImpl.getCellComparator(this.tableName) : CellComparator.getInstance();
}
 
源代码18 项目: hbase   文件: TestDataBlockEncoders.java
/**
 * Test whether the decompression of first key is implemented correctly.
 * @throws IOException
 */
@Test
public void testFirstKeyInBlockOnSample() throws IOException {
  List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);

  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null) {
      continue;
    }
    DataBlockEncoder encoder = encoding.getEncoder();
    ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
        getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
    Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer));
    KeyValue firstKv = sampleKv.get(0);
    if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) {
      int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(key, firstKv, false, true);
      fail(String.format("Bug in '%s' commonPrefix %d", encoder.toString(), commonPrefix));
    }
  }
}
 
源代码19 项目: hbase   文件: TestHFileOutputFormat2.java
private void setupMockColumnFamiliesForCompression(Table table,
    Map<String, Compression.Algorithm> familyToCompression) throws IOException {

  TableDescriptorBuilder mockTableDescriptor =
    TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]);
  for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
    ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder
      .newBuilder(Bytes.toBytes(entry.getKey()))
      .setMaxVersions(1)
      .setCompressionType(entry.getValue())
      .setBlockCacheEnabled(false)
      .setTimeToLive(0)
      .build();

    mockTableDescriptor.setColumnFamily(columnFamilyDescriptor);
  }
  Mockito.doReturn(mockTableDescriptor.build()).when(table).getDescriptor();
}
 
源代码20 项目: PoseidonX   文件: HTableOperatorImpl.java
private HColumnDescriptor changeCd(ColumnDescriptor cd){
    HColumnDescriptor family = new HColumnDescriptor(cd.getFamilyName());
    if(cd.isCompress()){
        family.setCompactionCompressionType(Compression.Algorithm.GZ);
    }
    return family ;
}
 
源代码21 项目: hbase   文件: ThriftUtilities.java
public static Compression.Algorithm compressionAlgorithmFromThrift(TCompressionAlgorithm in) {
  switch (in.getValue()) {
    case 0: return Compression.Algorithm.LZO;
    case 1: return Compression.Algorithm.GZ;
    case 2: return Compression.Algorithm.NONE;
    case 3: return Compression.Algorithm.SNAPPY;
    case 4: return Compression.Algorithm.LZ4;
    case 5: return Compression.Algorithm.BZIP2;
    case 6: return Compression.Algorithm.ZSTD;
    default: return Compression.Algorithm.NONE;
  }
}
 
源代码22 项目: hbase   文件: ThriftUtilities.java
public static TCompressionAlgorithm compressionAlgorithmFromHBase(Compression.Algorithm in) {
  switch (in) {
    case LZO: return TCompressionAlgorithm.LZO;
    case GZ: return TCompressionAlgorithm.GZ;
    case NONE: return TCompressionAlgorithm.NONE;
    case SNAPPY: return TCompressionAlgorithm.SNAPPY;
    case LZ4: return TCompressionAlgorithm.LZ4;
    case BZIP2: return TCompressionAlgorithm.BZIP2;
    case ZSTD: return TCompressionAlgorithm.ZSTD;
    default: return TCompressionAlgorithm.NONE;
  }
}
 
源代码23 项目: spliceengine   文件: HBaseSITestEnv.java
public static HColumnDescriptor createDataFamily() {
    HColumnDescriptor snapshot = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES);
    snapshot.setMaxVersions(Integer.MAX_VALUE);
    snapshot.setCompressionType(Compression.Algorithm.NONE);
    snapshot.setInMemory(true);
    snapshot.setBlockCacheEnabled(true);
    snapshot.setBloomFilterType(BloomType.ROW);
    return snapshot;
}
 
源代码24 项目: hbase   文件: HMobStore.java
/**
 * Creates the writer for the mob file in temp directory.
 * @param date The latest date of written cells.
 * @param maxKeyCount The key count.
 * @param compression The compression algorithm.
 * @param startKey The start key.
 * @param isCompaction If the writer is used in compaction.
 * @return The writer for the mob file.
 * @throws IOException
 */
public StoreFileWriter createWriterInTmp(Date date, long maxKeyCount,
    Compression.Algorithm compression, byte[] startKey,
    boolean isCompaction) throws IOException {
  if (startKey == null) {
    startKey = HConstants.EMPTY_START_ROW;
  }
  Path path = getTempDir();
  return createWriterInTmp(MobUtils.formatDate(date), path, maxKeyCount, compression, startKey,
    isCompaction);
}
 
源代码25 项目: hbase   文件: Compactor.java
Compactor(Configuration conf, HStore store) {
  this.conf = conf;
  this.store = store;
  this.compactionKVMax =
    this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
  this.compactionCompression = (this.store.getColumnFamilyDescriptor() == null) ?
      Compression.Algorithm.NONE : this.store.getColumnFamilyDescriptor().getCompactionCompressionType();
  this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD,
    HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD);
  this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true);
  this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true);
}
 
源代码26 项目: spliceengine   文件: HBaseSITestEnv.java
private static HTableDescriptor generateTransactionTable() throws IOException{
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("splice",HConfiguration.TRANSACTION_TABLE));
    desc.addCoprocessor(TxnLifecycleEndpoint.class.getName());

    HColumnDescriptor columnDescriptor = new HColumnDescriptor(SIConstants.DEFAULT_FAMILY_BYTES);
    columnDescriptor.setMaxVersions(5);
    columnDescriptor.setCompressionType(Compression.Algorithm.NONE);
    columnDescriptor.setInMemory(true);
    columnDescriptor.setBlockCacheEnabled(true);
    columnDescriptor.setBloomFilterType(BloomType.ROWCOL);
    desc.addFamily(columnDescriptor);
    desc.addFamily(new HColumnDescriptor(Bytes.toBytes(SIConstants.SI_PERMISSION_FAMILY)));
    return desc;
}
 
源代码27 项目: pinpoint   文件: ProgramOptions.java
private static String getCompression(ApplicationArguments args) {
    List<String> compressions = args.getOptionValues(COMPRESSION);
    if (CollectionUtils.isEmpty(compressions)) {
        return Compression.Algorithm.NONE.getName();
    }
    String compression = compressions.get(0);
    if (StringUtils.isEmpty(compression)) {
        return Compression.Algorithm.NONE.getName();
    }
    return compression;
}
 
源代码28 项目: foxtrot   文件: HbaseTableConnection.java
private HTableDescriptor constructHTableDescriptor(final Table table) {
    String tableName = TableUtil.getTableName(hbaseConfig, table);

    HTableDescriptor hTableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(DEFAULT_FAMILY_NAME);
    hColumnDescriptor.setCompressionType(Compression.Algorithm.GZ);
    hColumnDescriptor.setTimeToLive(Math.toIntExact(TimeUnit.DAYS.toSeconds(table.getTtl())));
    hTableDescriptor.addFamily(hColumnDescriptor);
    return hTableDescriptor;
}
 
源代码29 项目: hbase   文件: TestHStore.java
@Test
public void testHFileContextSetWithCFAndTable() throws Exception {
  init(this.name.getMethodName());
  StoreFileWriter writer = store.createWriterInTmp(10000L,
      Compression.Algorithm.NONE, false, true, false, true);
  HFileContext hFileContext = writer.getHFileWriter().getFileContext();
  assertArrayEquals(family, hFileContext.getColumnFamily());
  assertArrayEquals(table, hFileContext.getTableName());
}
 
源代码30 项目: SparkOnALog   文件: HBaseCreateTable.java
public static void main(String[] args) throws IOException {
	if (args.length == 0) {
		System.out.println("CreateTable {tableName} {columnFamilyName}");
		return;
	}

	String tableName = args[0];
	String columnFamilyName = args[1];

	HBaseAdmin admin = new HBaseAdmin(new Configuration());

	HTableDescriptor tableDescriptor = new HTableDescriptor(); 
	tableDescriptor.setName(Bytes.toBytes(tableName));

	HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);

	columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
	columnDescriptor.setBlocksize(64 * 1024);
	columnDescriptor.setBloomFilterType(BloomType.ROW);

	tableDescriptor.addFamily(columnDescriptor);

	//tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());

	System.out.println("-Creating Table");
	admin.createTable(tableDescriptor);

	admin.close();
	System.out.println("-Done");
}
 
 类所在包
 同包方法