下面列出了org.apache.hadoop.hbase.regionserver.StoreFile.BloomType#org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private static void setupConf(Configuration conf) {
// enable snapshot support
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around
// some files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
conf.setInt("hbase.regionserver.msginterval", 100);
conf.setBoolean("hbase.master.enabletable.roundrobin", true);
// Avoid potentially aggressive splitting which would cause snapshot to fail
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
}
private static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + new Path(TEMP_DIR, ".tmpDir").toUri());
}
private static void setupConf(Configuration conf) throws IOException {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
String snapshotPath = UTIL.getDefaultRootDirPath().toString() + Path.SEPARATOR +
UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir" + Path.SEPARATOR;
conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + new Path(snapshotPath).toUri());
}
static void setupConf(Configuration conf) {
// Up the handlers; this test needs more than usual.
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 15);
// enable snapshot support
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around
// some files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
conf.setInt("hbase.regionserver.msginterval", 100);
conf.setBoolean("hbase.master.enabletable.roundrobin", true);
// Avoid potentially aggressive splitting which would cause snapshot to fail
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
// Execute cleaner frequently to induce failures
conf.setInt("hbase.master.cleaner.interval", CLEANER_INTERVAL);
conf.setInt("hbase.master.hfilecleaner.plugins.snapshot.period", CLEANER_INTERVAL);
// Effectively disable TimeToLiveHFileCleaner. Don't want to fully disable it because that
// will even trigger races between creating the directory containing back references and
// the back reference itself.
conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL);
}
protected static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
}
private static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 2);
conf.setInt("hbase.hstore.compactionThreshold", 5);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles);
// Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, "");
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.setLong(SnapshotManager.HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS, 3 * 1000L);
conf.setLong(SnapshotHFileCleaner.HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, cacheRefreshPeriod);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.setInt("hbase.hfile.compactions.cleaner.interval", 20 * 1000);
conf.setInt("hbase.master.cleaner.snapshot.interval", 500);
}
private Table setupTable(TableName tableName) throws Exception {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
// Flush many files, but do not compact immediately
// Make sure that the region does not split
builder
.setMemStoreFlushSize(5000)
.setRegionSplitPolicyClassName(ConstantSizeRegionSplitPolicy.class.getName())
.setMaxFileSize(100 * 1024 * 1024)
.setValue("hbase.hstore.compactionThreshold", "250");
TableDescriptor td = builder.build();
byte[] fam = Bytes.toBytes("fam");
Table table = TEST_UTIL.createTable(td, new byte[][] {fam},
TEST_UTIL.getConfiguration());
TEST_UTIL.loadTable(table, fam);
return table;
}
protected static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
}
private static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// drop the memstore size so we get flushes
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// disable major compactions
conf.setInt(HConstants.MAJOR_COMPACTION_PERIOD, 0);
// prevent aggressive region split
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Set small flush size for minicluster so we exercise reseeking scanners
Configuration conf = UTIL.getConfiguration();
conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(128 * 1024));
// prevent aggressive region split
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.setInt("hfile.format.version", 3); // for mob tests
UTIL.startMiniCluster(1);
}
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.client.scanner.caching", 1000);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
// set no splits
conf.setLong(HConstants.HREGION_MAX_FILESIZE, (1024L) * 1024 * 1024 * 10);
TEST_UTIL.startMiniCluster();
}
@Override
public void setUpCluster() throws Exception {
// Set small flush size for minicluster so we exercise reseeking scanners
util = getTestingUtil(getConf());
util.initializeCluster(SERVER_COUNT);
conf = getConf();
conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(128 * 1024));
// prevent aggressive region split
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
tool = new AcidGuaranteesTestTool();
tool.setConf(getConf());
}
public ChangeSplitPolicyAction(TableName tableName) {
this.tableName = tableName;
possiblePolicies = new String[] {
IncreasingToUpperBoundRegionSplitPolicy.class.getName(),
ConstantSizeRegionSplitPolicy.class.getName(),
DisabledRegionSplitPolicy.class.getName()
};
this.random = new Random();
}
private static boolean createTable(byte[] tableName, byte[] columnFamilyName,
short regionCount, long regionMaxSize, HBaseAdmin admin)
throws IOException {
if (admin.tableExists(tableName)) {
return false;
}
HTableDescriptor tableDescriptor = new HTableDescriptor();
tableDescriptor.setName(tableName);
HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);
columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
columnDescriptor.setBlocksize(64 * 1024);
columnDescriptor.setBloomFilterType(BloomType.ROW);
columnDescriptor.setMaxVersions(10);
tableDescriptor.addFamily(columnDescriptor);
tableDescriptor.setMaxFileSize(regionMaxSize);
tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY,
ConstantSizeRegionSplitPolicy.class.getName());
tableDescriptor.setDeferredLogFlush(true);
regionCount = (short) Math.abs(regionCount);
int regionRange = Short.MAX_VALUE / regionCount;
int counter = 0;
byte[][] splitKeys = new byte[regionCount][];
for (byte[] splitKey : splitKeys) {
counter = counter + regionRange;
String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
splitKey = Bytes.toBytes(key);
System.out.println(" - Split: " + splitKey);
}
return true;
}
private static void createTable(String tableName, String columnFamilyName,
short regionCount, long regionMaxSize, HBaseAdmin admin)
throws IOException {
System.out.println("Creating Table: " + tableName);
HTableDescriptor tableDescriptor = new HTableDescriptor();
tableDescriptor.setName(Bytes.toBytes(tableName));
HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);
columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
columnDescriptor.setBlocksize(64 * 1024);
columnDescriptor.setBloomFilterType(BloomType.ROW);
tableDescriptor.addFamily(columnDescriptor);
tableDescriptor.setMaxFileSize(regionMaxSize);
tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
tableDescriptor.setDeferredLogFlush(true);
regionCount = (short)Math.abs(regionCount);
int regionRange = Short.MAX_VALUE/regionCount;
int counter = 0;
byte[][] splitKeys = new byte[regionCount][];
for (int i = 0 ; i < splitKeys.length; i++) {
counter = counter + regionRange;
String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
splitKeys[i] = Bytes.toBytes(key);
System.out.println(" - Split: " + i + " '" + key + "'");
}
admin.createTable(tableDescriptor, splitKeys);
}
@Override
public int run(String[] args) throws Exception {
Options options = new Options();
options.addOption(OPTION_CUBE_NAME);
options.addOption(OPTION_PARTITION_FILE_PATH);
options.addOption(OPTION_HTABLE_NAME);
parseOptions(options, args);
Path partitionFilePath = new Path(getOptionValue(OPTION_PARTITION_FILE_PATH));
String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase();
KylinConfig config = KylinConfig.getInstanceFromEnv();
CubeManager cubeMgr = CubeManager.getInstance(config);
CubeInstance cube = cubeMgr.getCube(cubeName);
CubeDesc cubeDesc = cube.getDescriptor();
String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase();
HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
// https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html
tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix());
Configuration conf = HBaseConfiguration.create(getConf());
HBaseAdmin admin = new HBaseAdmin(conf);
try {
if (User.isHBaseSecurityEnabled(conf)) {
// add coprocessor for bulk load
tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
}
for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHBaseMapping().getColumnFamily()) {
HColumnDescriptor cf = new HColumnDescriptor(cfDesc.getName());
cf.setMaxVersions(1);
if (LZOSupportnessChecker.getSupportness()) {
logger.info("hbase will use lzo to compress data");
cf.setCompressionType(Algorithm.LZO);
} else {
logger.info("hbase will not use lzo to compress data");
}
cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
cf.setInMemory(false);
cf.setBlocksize(4 * 1024 * 1024); // set to 4MB
tableDesc.addFamily(cf);
}
byte[][] splitKeys = getSplits(conf, partitionFilePath);
if (admin.tableExists(tableName)) {
// admin.disableTable(tableName);
// admin.deleteTable(tableName);
throw new RuntimeException("HBase table " + tableName + " exists!");
}
DeployCoprocessorCLI.deployCoprocessor(tableDesc);
admin.createTable(tableDesc, splitKeys);
logger.info("create hbase table " + tableName + " done.");
return 0;
} catch (Exception e) {
printUsage(options);
e.printStackTrace(System.err);
logger.error(e.getLocalizedMessage(), e);
return 2;
} finally {
admin.close();
}
}