类org.apache.hadoop.hbase.HBaseConfiguration源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.HBaseConfiguration的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: super-cloudops   文件: HfileBulkImporter.java
/**
 * e.g.</br>
 * 
 * <pre>
 *  yarn jar super-devops-tool-hbase-migrator-master.jar \
 *  com.wl4g.devops.tool.hbase.migrator.HfileBulkImporter \
 *  -z emr-header-1:2181 \
 *  -t safeclound.tb_elec_power \
 *  -p /tmp-devops/safeclound.tb_elec_power
 * </pre>
 * 
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
	HbaseMigrateUtils.showBanner();

	CommandLine line = new Builder().option("z", "zkaddr", null, "Zookeeper address.")
			.option("t", "tabname", null, "Hbase table name.")
			.option("p", "path", null, "Data hdfs path to be import. e.g. hdfs://localhost:9000/bak/safeclound.tb_air")
			.build(args);

	Configuration cfg = HBaseConfiguration.create();
	cfg.set("hbase.zookeeper.quorum", line.getOptionValue("z"));
	Connection conn = ConnectionFactory.createConnection(cfg);
	Admin admin = conn.getAdmin();
	Table table = conn.getTable(TableName.valueOf(line.getOptionValue("t")));
	LoadIncrementalHFiles load = new LoadIncrementalHFiles(cfg);
	load.doBulkLoad(new Path(line.getOptionValue("p")), admin, table,
			conn.getRegionLocator(TableName.valueOf(line.getOptionValue("t"))));
}
 
源代码2 项目: hudi   文件: HBaseIndex.java
private Connection getHBaseConnection() {
  Configuration hbaseConfig = HBaseConfiguration.create();
  String quorum = config.getHbaseZkQuorum();
  hbaseConfig.set("hbase.zookeeper.quorum", quorum);
  String zkZnodeParent = config.getHBaseZkZnodeParent();
  if (zkZnodeParent != null) {
    hbaseConfig.set("zookeeper.znode.parent", zkZnodeParent);
  }
  String port = String.valueOf(config.getHbaseZkPort());
  hbaseConfig.set("hbase.zookeeper.property.clientPort", port);
  try {
    return ConnectionFactory.createConnection(hbaseConfig);
  } catch (IOException e) {
    throw new HoodieDependentSystemUnavailableException(HoodieDependentSystemUnavailableException.HBASE,
        quorum + ":" + port);
  }
}
 
源代码3 项目: hbase   文件: TestTableInputFormat.java
@Override
protected void initialize(JobContext job) throws IOException {
  Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
      job.getConfiguration()));
  TableName tableName = TableName.valueOf("exampleTable");
  // mandatory
  initializeTable(connection, tableName);
  byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
    Bytes.toBytes("columnB") };
  //optional
  Scan scan = new Scan();
  for (byte[] family : inputColumns) {
    scan.addFamily(family);
  }
  Filter exampleFilter =
    new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*"));
  scan.setFilter(exampleFilter);
  setScan(scan);
}
 
源代码4 项目: phoenix   文件: IndexUpgradeTool.java
@VisibleForTesting
public int executeTool() {
    Configuration conf = HBaseConfiguration.addHbaseResources(getConf());

    try (Connection conn = getConnection(conf)) {
        ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class)
                .getQueryServices();

        boolean status = extractTablesAndIndexes(conn.unwrap(PhoenixConnection.class));

        if (status) {
            return executeTool(conn, queryServices, conf);
        }
    } catch (SQLException e) {
        LOGGER.severe("Something went wrong in executing tool "+ e);
    }
    return -1;
}
 
源代码5 项目: phoenix   文件: IndexScrutinyTool.java
private Job configureSubmittableJob(Job job, Path outputPath, Class<IndexScrutinyMapperForTest> mapperClass) throws Exception {
    Configuration conf = job.getConfiguration();
    conf.setBoolean("mapreduce.job.user.classpath.first", true);
    HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf));
    job.setJarByClass(IndexScrutinyTool.class);
    job.setOutputFormatClass(NullOutputFormat.class);
    if (outputInvalidRows && OutputFormat.FILE.equals(outputFormat)) {
        job.setOutputFormatClass(TextOutputFormat.class);
        FileOutputFormat.setOutputPath(job, outputPath);
    }
    job.setMapperClass((mapperClass == null ? IndexScrutinyMapper.class : mapperClass));
    job.setNumReduceTasks(0);
    // Set the Output classes
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    TableMapReduceUtil.addDependencyJars(job);
    return job;
}
 
源代码6 项目: ranger   文件: HBaseRangerAuthorizationTest.java
@Test
public void testReadRowFromColFam2AsGroupIT() throws Exception {
    final Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "localhost");
    conf.set("hbase.zookeeper.property.clientPort", "" + port);
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    String user = "public";

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            Connection conn = ConnectionFactory.createConnection(conf);
            Table table = conn.getTable(TableName.valueOf("temp"));
            
            // Read a row
            Get get = new Get(Bytes.toBytes("row1"));
            Result result = table.get(get);
            byte[] valResult = result.getValue(Bytes.toBytes("colfam2"), Bytes.toBytes("col1"));
            Assert.assertNull(valResult);

            conn.close();
            return null;
        }
    });
}
 
源代码7 项目: Kylin   文件: TestHbaseClient.java
public static void main(String[] args) throws IOException {
    foo(6, 5);
    foo(5, 2);
    foo(3, 0);

    Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "hbase_host");
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    HTable table = new HTable(conf, "test1");
    Put put = new Put(Bytes.toBytes("row1"));

    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1"));
    put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2"));

    table.put(put);
    table.close();
}
 
源代码8 项目: hbase   文件: ReplicationSink.java
/**
 * Create a sink for replication
 * @param conf conf object
 * @param stopper boolean to tell this thread to stop
 * @throws IOException thrown when HDFS goes bad or bad file name
 */
public ReplicationSink(Configuration conf, Stoppable stopper)
    throws IOException {
  this.conf = HBaseConfiguration.create(conf);
  decorateConf();
  this.metrics = new MetricsSink();
  this.walEntrySinkFilter = setupWALEntrySinkFilter();
  String className = conf.get("hbase.replication.source.fs.conf.provider",
    DefaultSourceFSConfigurationProvider.class.getCanonicalName());
  try {
    Class<? extends SourceFSConfigurationProvider> c =
        Class.forName(className).asSubclass(SourceFSConfigurationProvider.class);
    this.provider = c.getDeclaredConstructor().newInstance();
  } catch (Exception e) {
    throw new IllegalArgumentException(
        "Configured source fs configuration provider class " + className + " throws error.", e);
  }
}
 
@BeforeClass
public static void beforeAllTests() throws Exception {
  groups = new String[] { RSGroupInfo.DEFAULT_GROUP };
  servers = generateServers(3);
  groupMap = constructGroupInfo(servers, groups);
  tableDescs = constructTableDesc(false);
  Configuration conf = HBaseConfiguration.create();
  conf.set("hbase.regions.slop", "0");
  conf.setFloat("hbase.master.balancer.stochastic.readRequestCost", 10000f);
  conf.set("hbase.rsgroup.grouploadbalancer.class",
      StochasticLoadBalancer.class.getCanonicalName());
  loadBalancer = new RSGroupBasedLoadBalancer();
  loadBalancer.setRsGroupInfoManager(getMockedGroupInfoManager());
  loadBalancer.setMasterServices(getMockedMaster());
  loadBalancer.setConf(conf);
  loadBalancer.initialize();
}
 
源代码10 项目: hbase   文件: TestThriftConnection.java
private static ThriftServer startThriftServer(int port, boolean useHttp) {
  Configuration thriftServerConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  thriftServerConf.setInt(Constants.PORT_CONF_KEY, port);
  if (useHttp) {
    thriftServerConf.setBoolean(Constants.USE_HTTP_CONF_KEY, true);
  }
  ThriftServer server = new ThriftServer(thriftServerConf);
  Thread thriftServerThread = new Thread(() -> {
    try{
      server.run();
    } catch (Exception t) {
      LOG.error("Thrift Server failed", t);
    }
  });
  thriftServerThread.setDaemon(true);
  thriftServerThread.start();
  if (useHttp) {
    TEST_UTIL.waitFor(10000, () -> server.getHttpServer() != null);
  } else {
    TEST_UTIL.waitFor(10000, () -> server.getTserver() != null);
  }
  return server;
}
 
源代码11 项目: metron   文件: HBaseDao.java
@Override
public synchronized void init(AccessConfig config) {
  if(this.tableInterface == null) {
    this.config = config;
    Map<String, Object> globalConfig = config.getGlobalConfigSupplier().get();
    if(globalConfig == null) {
      throw new IllegalStateException("Cannot find the global config.");
    }
    String table = (String)globalConfig.get(HBASE_TABLE);
    String cf = (String) config.getGlobalConfigSupplier().get().get(HBASE_CF);
    if(table == null || cf == null) {
      throw new IllegalStateException("You must configure " + HBASE_TABLE + " and " + HBASE_CF + " in the global config.");
    }
    try {
      tableInterface = config.getTableProvider().getTable(HBaseConfiguration.create(), table);
      this.cf = cf.getBytes(StandardCharsets.UTF_8);
    } catch (IOException e) {
      throw new IllegalStateException("Unable to initialize HBaseDao: " + e.getMessage(), e);
    }
  }
}
 
源代码12 项目: hbase   文件: TestThriftConnection.java
private static Connection createConnection(int port, boolean useHttp) throws IOException {
  Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL,
      ThriftConnection.class.getName());
  if (useHttp) {
    conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS,
        ThriftConnection.HTTPThriftClientBuilder.class.getName());
  }
  String host = HConstants.LOCALHOST;
  if (useHttp) {
    host = "http://" + host;
  }
  conf.set(Constants.HBASE_THRIFT_SERVER_NAME, host);
  conf.setInt(Constants.HBASE_THRIFT_SERVER_PORT, port);
  return ConnectionFactory.createConnection(conf);
}
 
源代码13 项目: flink-learning   文件: Main.java
private static void writeEventToHbase(String string, ParameterTool parameterTool) throws IOException {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(HBASE_ZOOKEEPER_QUORUM, parameterTool.get(HBASE_ZOOKEEPER_QUORUM));
    configuration.set(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT, parameterTool.get(HBASE_ZOOKEEPER_PROPERTY_CLIENTPORT));
    configuration.set(HBASE_RPC_TIMEOUT, parameterTool.get(HBASE_RPC_TIMEOUT));
    configuration.set(HBASE_CLIENT_OPERATION_TIMEOUT, parameterTool.get(HBASE_CLIENT_OPERATION_TIMEOUT));
    configuration.set(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, parameterTool.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD));

    Connection connect = ConnectionFactory.createConnection(configuration);
    Admin admin = connect.getAdmin();
    if (!admin.tableExists(HBASE_TABLE_NAME)) { //检查是否有该表,如果没有,创建
        admin.createTable(new HTableDescriptor(HBASE_TABLE_NAME).addFamily(new HColumnDescriptor(INFO_STREAM)));
    }
    Table table = connect.getTable(HBASE_TABLE_NAME);
    TimeStamp ts = new TimeStamp(new Date());
    Date date = ts.getDate();
    Put put = new Put(Bytes.toBytes(date.getTime()));
    put.addColumn(Bytes.toBytes(INFO_STREAM), Bytes.toBytes("test"), Bytes.toBytes(string));
    table.put(put);
    table.close();
    connect.close();
}
 
源代码14 项目: phoenix   文件: DropIndexDuringUpsertIT.java
@Before
public void doSetup() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    conf.setInt("hbase.client.retries.number", 2);
    conf.setInt("hbase.client.pause", 5000);
    conf.setInt("hbase.balancer.period", Integer.MAX_VALUE);
    conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0);
    util = new HBaseTestingUtility(conf);
    util.startMiniCluster(NUM_SLAVES);
    String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
    url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
            + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;

    Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
    // Must update config before starting server
    props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
    driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
}
 
源代码15 项目: opensoc-streaming   文件: HBaseStreamPartitioner.java
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) {
  
  System.out.println("preparing HBaseStreamPartitioner for streamId " + stream.get_streamId());
  this.targetTasks = targetTasks;
  this.targetTasksSize = this.targetTasks.size();

  Configuration conf = HBaseConfiguration.create();
  try {
    hTable = new HTable(conf, tableName);
    refreshRegionInfo(tableName);

    System.out.println("regionStartKeyRegionNameMap: " + regionStartKeyRegionNameMap);

  } catch (IOException e) {
    e.printStackTrace();
  }

}
 
源代码16 项目: hbase   文件: BackupSystemTable.java
/**
 * Get backup system table descriptor
 * @return table's descriptor
 */
public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) {
  TableDescriptorBuilder builder =
      TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf));

  ColumnFamilyDescriptorBuilder colBuilder =
      ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY);
  colBuilder.setMaxVersions(1);
  Configuration config = HBaseConfiguration.create();
  int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY,
    BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
  colBuilder.setTimeToLive(ttl);
  ColumnFamilyDescriptor colSessionsDesc = colBuilder.build();
  builder.setColumnFamily(colSessionsDesc);
  colBuilder = ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
  colBuilder.setTimeToLive(ttl);
  builder.setColumnFamily(colBuilder.build());
  return builder.build();
}
 
源代码17 项目: eagle   文件: TestHBaseBase.java
@BeforeClass
public static void setUpHBase() {
    Configuration configuration = HBaseConfiguration.create();
    configuration.set("zookeeper.znode.parent", getZkZnodeParent());
    configuration.setInt("hbase.master.info.port", -1);//avoid port clobbering
    configuration.setInt("hbase.regionserver.info.port", -1);//avoid port clobbering
    hbase = new HBaseTestingUtility(configuration);
    try {
        hbase.startMiniCluster();
    } catch (Exception e) {
        LOGGER.error("Error to start hbase mini cluster: " + e.getMessage(), e);
        throw new IllegalStateException(e);
    }
    System.setProperty("storage.hbase.autoCreateTable","false");
    System.setProperty("storage.hbase.zookeeperZnodeParent", getZkZnodeParent());
    System.setProperty("storage.hbase.zookeeperPropertyClientPort", String.valueOf(hbase.getZkCluster().getClientPort()));
}
 
源代码18 项目: hbase   文件: TestExecutorService.java
@Test
public void testSnapshotHandlers() throws Exception {
  final Configuration conf = HBaseConfiguration.create();
  final Server server = mock(Server.class);
  when(server.getConfiguration()).thenReturn(conf);

  ExecutorService executorService = new ExecutorService("testSnapshotHandlers");
  executorService.startExecutorService(ExecutorType.MASTER_SNAPSHOT_OPERATIONS, 1);

  CountDownLatch latch = new CountDownLatch(1);
  CountDownLatch waitForEventToStart = new CountDownLatch(1);
  executorService.submit(new EventHandler(server, EventType.C_M_SNAPSHOT_TABLE) {
    @Override
    public void process() throws IOException {
      waitForEventToStart.countDown();
      try {
        latch.await();
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
      }
    }
  });

  //Wait EventHandler to start
  waitForEventToStart.await(10, TimeUnit.SECONDS);
  int activeCount = executorService.getExecutor(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
      .getThreadPoolExecutor().getActiveCount();
  Assert.assertEquals(1, activeCount);
  latch.countDown();
  Waiter.waitFor(conf, 3000, () -> {
    int count = executorService.getExecutor(ExecutorType.MASTER_SNAPSHOT_OPERATIONS)
        .getThreadPoolExecutor().getActiveCount();
    return count == 0;
  });
}
 
源代码19 项目: phoenix   文件: UpdateStatisticsToolTest.java
@Test
public void testRestoreDirFromConfig() {
    UpdateStatisticsTool tool = new UpdateStatisticsTool();
    Configuration configuration = HBaseConfiguration.create();
    configuration.set(FS_DEFAULT_NAME_KEY, "hdfs://base-dir");
    tool.setConf(configuration);
    tool.parseArgs(new String[] {"-t", "table1", "-ms", "-runfg"});
    assertEquals("hdfs://base-dir/tmp", tool.getRestoreDir().toString());
}
 
源代码20 项目: attic-apex-malhar   文件: HBaseTestHelper.java
private static Configuration getConfiguration()
{
  Configuration conf = HBaseConfiguration.create();
  conf.set("hbase.zookeeper.quorum", "127.0.0.1");
  conf.set("hbase.zookeeper.property.clientPort", "2181");
  return conf;
}
 
源代码21 项目: hbase   文件: ThriftServer.java
public static void main(String [] args) throws Exception {
  LOG.info("***** STARTING service '" + ThriftServer.class.getSimpleName() + "' *****");
  VersionInfo.logVersion();
  final Configuration conf = HBaseConfiguration.create();
  // for now, only time we return is on an argument error.
  final int status = ToolRunner.run(conf, new ThriftServer(conf), args);
  LOG.info("***** STOPPING service '" + ThriftServer.class.getSimpleName() + "' *****");
  System.exit(status);
}
 
源代码22 项目: hbase   文件: TestSimpleRequestController.java
private void testIllegalArgument(String key, long value) {
  Configuration conf = HBaseConfiguration.create();
  conf.setLong(key, value);
  try {
    new SimpleRequestController(conf);
    fail("The " + key + " must be bigger than zero");
  } catch (IllegalArgumentException e) {
  }
}
 
源代码23 项目: hbase   文件: TestSimpleRpcScheduler.java
@Test
public void testScanQueueWithZeroScanRatio() throws Exception {

  Configuration schedConf = HBaseConfiguration.create();
  schedConf.setFloat(RpcExecutor.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 1.0f);
  schedConf.setFloat(RWQueueRpcExecutor.CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5f);
  schedConf.setFloat(RWQueueRpcExecutor.CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0f);

  PriorityFunction priority = mock(PriorityFunction.class);
  when(priority.getPriority(any(), any(), any())).thenReturn(HConstants.NORMAL_QOS);

  RpcScheduler scheduler = new SimpleRpcScheduler(schedConf, 2, 1, 1, priority,
                                                  HConstants.QOS_THRESHOLD);
  assertNotEquals(null, scheduler);
}
 
源代码24 项目: jstorm   文件: AbstractHBaseClient.java
public Configuration makeConf(Map stormConf) {
    Configuration hbaseConf = HBaseConfiguration.create();
    String hbaseQuorum = (String) stormConf.get(HBASE_QUORUM_CONF_KEY);
    hbaseConf.set(HBASE_QUORUM_KEY, hbaseQuorum);

    String hbasePort = stormConf.get(HBASE_PORT_CONF_KEY) + "";
    hbaseConf.set(HBASE_PORT_KEY, hbasePort);

    String hbaseParent = (String) stormConf.get(HBASE_ZK_PARENT_CONF_KEY);
    hbaseConf.set(HBASE_ZK_PARENT_KEY, hbaseParent);

    return hbaseConf;
}
 
源代码25 项目: phoenix   文件: IndexUpgradeToolTest.java
@Test
public void testConnectionProperties() throws Exception {
    Configuration conf = HBaseConfiguration.create();

    long indexRebuildQueryTimeoutMs = 2000;
    long indexRebuildRpcTimeoutMs = 3000;
    long indexRebuildClientScannerTimeoutMs = 4000;
    int indexRebuildRpcRetryCount = 10;

    conf.setLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB, indexRebuildQueryTimeoutMs);
    conf.setLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB, indexRebuildRpcTimeoutMs);
    conf.setLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB,
            indexRebuildClientScannerTimeoutMs);
    conf.setInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, indexRebuildRpcRetryCount);

    // prepare conf for connectionless query
    setupConfForConnectionlessQuery(conf);

    try (Connection conn = IndexUpgradeTool.getConnection(conf)) {
        // verify connection properties for phoenix, hbase timeouts and retries
        Assert.assertEquals(conn.getClientInfo(QueryServices.THREAD_TIMEOUT_MS_ATTRIB),
                Long.toString(indexRebuildQueryTimeoutMs));
        Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_RPC_TIMEOUT_KEY),
                Long.toString(indexRebuildRpcTimeoutMs));
        Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD),
                Long.toString(indexRebuildClientScannerTimeoutMs));
        Assert.assertEquals(conn.getClientInfo(HConstants.HBASE_CLIENT_RETRIES_NUMBER),
                Long.toString(indexRebuildRpcRetryCount));
    }
}
 
源代码26 项目: hbase   文件: TestHeapMemoryManager.java
@Test
public void testPluggingInHeapMemoryTuner() throws Exception {
  BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
  MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.78f);
  conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.05f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.02f);
  conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
  conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
  conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class,
      HeapMemoryTuner.class);
  // Let the system start with default values for memstore heap and block cache size.
  HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
      new RegionServerStub(conf), new RegionServerAccountingStub(conf));
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
  heapMemoryManager.start(choreService);
  // Now we wants to be in write mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.memstoreSize = 0.78f;
  CustomHeapMemoryTuner.blockCacheSize = 0.02f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.78f, memStoreFlusher.memstoreSize);// Memstore
  assertHeapSpace(0.02f, blockCache.maxSize);// BlockCache
  // Now we wants to be in read mode. Set bigger memstore size from CustomHeapMemoryTuner
  CustomHeapMemoryTuner.blockCacheSize = 0.75f;
  CustomHeapMemoryTuner.memstoreSize = 0.05f;
  // Allow the tuner to run once and do necessary memory up
  waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
  assertHeapSpace(0.75f, blockCache.maxSize);// BlockCache
  assertHeapSpace(0.05f, memStoreFlusher.memstoreSize);// Memstore
}
 
源代码27 项目: bigdata-tutorial   文件: Mapper2HbaseDemo.java
@Override
protected void setup(Context context) throws IOException,
		InterruptedException {
	super.setup(context);
	conf = HBaseConfiguration.create(context.getConfiguration());
	conf.set("hbase.zookeeper.quorum", "zk1.hadoop,zk2.hadoop,zk3.hadoop");
	conf.set("hbase.zookeeper.property.clientPort", "2181");

	htable = new HTable(conf, "micmiu");
	htable.setAutoFlush(false);
	htable.setWriteBufferSize(12 * 1024 * 1024);//12M
	wal = true;
}
 
源代码28 项目: hbase   文件: TestZKUtilNoServer.java
@Test
public void testCreateACLWithSameUser() throws IOException {
  Configuration conf = HBaseConfiguration.create();
  conf.set(Superusers.SUPERUSER_CONF_KEY, "user4,@group1,user5,user6");
  UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser("user4"));
  String node = "/hbase/testCreateACL";
  ZKWatcher watcher = new ZKWatcher(conf, node, null, false);
  List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
  assertEquals(3, aclList.size()); // 3, since service user the same as one of superuser
  assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1"))));
  assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("auth", ""))));
  assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user5"))));
  assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user6"))));
}
 
源代码29 项目: phoenix   文件: Sandbox.java
public static void main(String[] args) throws Exception {
    System.out.println("Starting Phoenix sandbox");
    Configuration conf = HBaseConfiguration.create();
    BaseTest.setUpConfigForMiniCluster(conf, new ReadOnlyProps(ImmutableMap.<String, String>of()));

    final HBaseTestingUtility testUtil = new HBaseTestingUtility(conf);
    testUtil.startMiniCluster();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                if (testUtil != null) {
                    testUtil.shutdownMiniCluster();
                }
            } catch (Exception e) {
                LOG.error("Exception caught when shutting down mini cluster", e);
            }
        }
    });

    int clientPort = testUtil.getZkCluster().getClientPort();
    System.out.println("\n\n\tPhoenix Sandbox is started\n\n");
    System.out.printf("\tYou can now connect with url 'jdbc:phoenix:localhost:%d'\n" +
                    "\tor connect via sqlline with 'bin/sqlline.py localhost:%d'\n\n",
            clientPort, clientPort);

    Thread.sleep(Long.MAX_VALUE);
}
 
源代码30 项目: hbase   文件: TestStripeCompactionPolicy.java
@Test
public void testNothingToCompactFromL0() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 4);
  StripeCompactionPolicy.StripeInformationProvider si = createStripesL0Only(3, 10);
  StripeCompactionPolicy policy = createPolicy(conf);
  verifyNoCompaction(policy, si);

  si = createStripes(3, KEY_A);
  verifyNoCompaction(policy, si);
}
 
 类所在包
 同包方法