org.apache.hadoop.hbase.client.HBaseAdmin#getTableRegions ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.client.HBaseAdmin#getTableRegions ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: examples   文件: Merge.java
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
  // tag::MERGE1[]
  Configuration conf = HBaseConfiguration.create();
  Connection connection = ConnectionFactory.createConnection(conf);
  HBaseAdmin admin = (HBaseAdmin)connection.getAdmin();
  List<HRegionInfo> regions = admin.getTableRegions(TableName.valueOf("t1")); //<1>
  LOG.info("testtable contains " + regions.size() + " regions.");
  for (int index = 0; index < regions.size() / 2; index++) {
    HRegionInfo region1 = regions.get(index*2);
    HRegionInfo region2 = regions.get(index*2+1);
    LOG.info("Merging regions " + region1 + " and " + region2);
    admin.mergeRegions(region1.getEncodedNameAsBytes(), 
                       region2.getEncodedNameAsBytes(), false); //<2>
  }
  admin.close();
  // end::MERGE1[]
}
 
@Test
public void testCachedConnections() throws Exception {
  final String tableName = generateUniqueName();
  final String index1Name = generateUniqueName();
  final Connection conn = DriverManager.getConnection(getUrl());

  final HBaseAdmin admin = getUtility().getHBaseAdmin();
  final MiniHBaseCluster cluster = getUtility().getHBaseCluster();
  final HRegionServer regionServer = cluster.getRegionServer(0);
  Configuration conf = admin.getConfiguration();
  final int noOfOrgs = 20;
  final AtomicBoolean flag = new AtomicBoolean();
  flag.set(false);
  // create table and indices
  String createTableSql = "CREATE TABLE " + tableName
      + "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, v2 INTEGER, v3 INTEGER) VERSIONS=1 SPLIT ON ('"
      + ORG_PREFIX + "-" + noOfOrgs / 2 + "')";
  conn.createStatement().execute(createTableSql);
  conn.createStatement().execute("CREATE INDEX " + index1Name + " ON " + tableName + "(v1)");
  List<HRegionInfo> regions = admin.getTableRegions(TableName.valueOf(tableName));
  final HRegionInfo regionInfo = regions.get(0);

  writeToTable(tableName, conn, noOfOrgs);
  int beforeRegionCloseCount = getActiveConnections(regionServer, conf);
  int regionsCount = admin.getOnlineRegions(regionServer.getServerName()).size();
  admin.unassign(regionInfo.getEncodedNameAsBytes(), true);
  while(!(admin.getOnlineRegions(regionServer.getServerName()).size() < regionsCount));
  int afterRegionCloseCount = getActiveConnections(regionServer, conf);
  assertTrue("Cached connections not closed when region closes: ",
  afterRegionCloseCount == beforeRegionCloseCount && afterRegionCloseCount > 0);

}
 
源代码3 项目: spliceengine   文件: CompactionSplitIT.java
@Test
public void testSplitRegion() throws Throwable {
    String tableName = "D";
    String columnName = "A";
    String schema = SCHEMA;
    String query = String.format("select * from %s order by %s", tableName, columnName);

    ResultSet rs = classWatcher.executeQuery(query);
    String actualResult = TestUtils.FormattedResult.ResultFactory.toStringUnsorted(rs);
    SConfiguration config = HConfiguration.getConfiguration();
    HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate());
    HBaseAdmin admin = testingUtility.getHBaseAdmin();
    TableName tn = TableName.valueOf(config.getNamespace(),
                                     Long.toString(TestUtils.baseTableConglomerateId(classWatcher.getOrCreateConnection(), SCHEMA, "D")));

    for (HRegionInfo info : admin.getTableRegions(tn)) {
        System.out.println(info.getRegionNameAsString()+" - "+info.getRegionName()+ " - "+info.getEncodedName());
        CallableStatement callableStatement = classWatcher.getOrCreateConnection().
            prepareCall("call SYSCS_UTIL.SYSCS_SPLIT_REGION_AT_POINTS(?,?)");
        callableStatement.setString(1, info.getEncodedName());
        callableStatement.setString(2, "");  // empty splitpoints will be turned into null arg (hbase will decide)

        assertTrue(HBaseTestUtils.setBlockPostSplit(true));

        helpTestProc(callableStatement, 10, classWatcher, query, actualResult);

        assertTrue(HBaseTestUtils.setBlockPostSplit(false));

        helpTestProc(callableStatement, 10, classWatcher, query, actualResult);
    }
}
 
源代码4 项目: spliceengine   文件: SpliceRegionAdminIT.java
@Test
public void testTable() throws Exception {

    Connection connection = methodWatcher.getOrCreateConnection();

    SConfiguration config = HConfiguration.getConfiguration();
    HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate());
    HBaseAdmin admin = testingUtility.getHBaseAdmin();

    long conglomerateId = TableSplit.getConglomerateId(connection, SCHEMA_NAME, LINEITEM, null);
    TableName tn = TableName.valueOf(config.getNamespace(),Long.toString(conglomerateId));
    List<HRegionInfo> partitions = admin.getTableRegions(tn);
    for (HRegionInfo partition : partitions) {
        String startKey = Bytes.toStringBinary(partition.getStartKey());
        int index = Collections.binarySearch(hbaseTableSplitKeys, startKey);
        String encodedRegionName = partition.getEncodedName();
        PreparedStatement ps = methodWatcher.prepareStatement("CALL SYSCS_UTIL.GET_START_KEY(?,?,null,?)");
        ps.setString(1, SCHEMA_NAME);
        ps.setString(2, LINEITEM);
        ps.setString(3, encodedRegionName);

        ResultSet rs = ps.executeQuery();
        rs.next();
        String result = rs.getString(1);
        Assert.assertEquals(result, spliceTableSplitKeys.get(index));
    }
}
 
源代码5 项目: spliceengine   文件: SpliceRegionAdminIT.java
@Test
public void testIndex() throws Exception {

    Connection connection = methodWatcher.getOrCreateConnection();
    SConfiguration config = HConfiguration.getConfiguration();
    HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate());
    HBaseAdmin admin = testingUtility.getHBaseAdmin();

    long conglomerateId = TableSplit.getConglomerateId(connection, SCHEMA_NAME, ORDERS, CUST_IDX);
    TableName tn = TableName.valueOf(config.getNamespace(),Long.toString(conglomerateId));
    List<HRegionInfo> partitions = admin.getTableRegions(tn);
    for (HRegionInfo partition : partitions) {
        String startKey = Bytes.toStringBinary(partition.getStartKey());
        int index = Collections.binarySearch(hbaseIndexSplitKeys, startKey);
        String encodedRegionName = partition.getEncodedName();
        PreparedStatement ps = methodWatcher.prepareStatement("CALL SYSCS_UTIL.GET_START_KEY(?,?,?,?)");
        ps.setString(1, SCHEMA_NAME);
        ps.setString(2, ORDERS);
        ps.setString(3, CUST_IDX);
        ps.setString(4, encodedRegionName);

        ResultSet rs = ps.executeQuery();
        rs.next();
        String result = rs.getString(1);
        Assert.assertEquals(result, spliceIndexSplitKeys.get(index));
    }
}
 
源代码6 项目: phoenix   文件: MutableIndexFailureIT.java
@Test(timeout=300000)
public void testWriteFailureWithRegionServerDown() throws Exception {
    String query;
    ResultSet rs;

    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = driver.connect(url, props);
    conn.setAutoCommit(false);
    conn.createStatement().execute(
            "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
    query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
    rs = conn.createStatement().executeQuery(query);
    assertFalse(rs.next());

    conn.createStatement().execute(
            "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1) INCLUDE (v2)");
    query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
    rs = conn.createStatement().executeQuery(query);
    assertFalse(rs.next());

    // Verify the metadata for index is correct.
    rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME,
            new String[] { PTableType.INDEX.toString() });
    assertTrue(rs.next());
    assertEquals(INDEX_TABLE_NAME, rs.getString(3));
    assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE"));
    assertFalse(rs.next());
    
    PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
    stmt.setString(1, "a");
    stmt.setString(2, "x");
    stmt.setString(3, "1");
    stmt.execute();
    conn.commit();
    
    // find a RS which doesn't has CATALOG table
    TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
    TableName indexTable = TableName.valueOf(INDEX_TABLE_FULL_NAME);
    final HBaseCluster cluster = this.util.getHBaseCluster();
    Collection<ServerName> rss = cluster.getClusterStatus().getServers();
    HBaseAdmin admin = this.util.getHBaseAdmin();
    List<HRegionInfo> regions = admin.getTableRegions(catalogTable);
    ServerName catalogRS = cluster.getServerHoldingRegion(regions.get(0).getTable(),
            regions.get(0).getRegionName());
    ServerName metaRS = cluster.getServerHoldingMeta();
    ServerName rsToBeKilled = null;
    
    // find first RS isn't holding META or CATALOG table
    for(ServerName curRS : rss) {
        if(!curRS.equals(catalogRS) && !metaRS.equals(curRS)) {
            rsToBeKilled = curRS;
            break;
        }
    }
    assertTrue(rsToBeKilled != null);
    
    regions = admin.getTableRegions(indexTable);
    final HRegionInfo indexRegion = regions.get(0);
    final ServerName dstRS = rsToBeKilled;
    admin.move(indexRegion.getEncodedNameAsBytes(), Bytes.toBytes(rsToBeKilled.getServerName()));
    this.util.waitFor(30000, 200, new Waiter.Predicate<Exception>() {
        @Override
        public boolean evaluate() throws Exception {
          ServerName sn = cluster.getServerHoldingRegion(indexRegion.getTable(),
                  indexRegion.getRegionName());
          return (sn != null && sn.equals(dstRS));
        }
      });
    
    // use timer sending updates in every 10ms
    this.scheduleTimer = new Timer(true);
    this.scheduleTimer.schedule(new SendingUpdatesScheduleTask(conn), 0, 10);
    // let timer sending some updates
    Thread.sleep(100);
    
    // kill RS hosting index table
    this.util.getHBaseCluster().killRegionServer(rsToBeKilled);
    
    // wait for index table completes recovery
    this.util.waitUntilAllRegionsAssigned(indexTable);
    
    // Verify the metadata for index is correct.       
    do {
      Thread.sleep(15 * 1000); // sleep 15 secs
      rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME,
          new String[] { PTableType.INDEX.toString() });
      assertTrue(rs.next());
      if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
          break;
      }
    } while(true);
    this.scheduleTimer.cancel();
    
    assertEquals(cluster.getClusterStatus().getDeadServers(), 1);
}
 
源代码7 项目: phoenix   文件: LocalIndexIT.java
@Test
public void testLocalIndexStateWhenSplittingInProgress() throws Exception {
    createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME+"2", null, "('e','j','o')");
    Connection conn1 = DriverManager.getConnection(getUrl());
    try{
        String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
        for (int i = 0; i < 26; i++) {
            conn1.createStatement().execute(
                "UPSERT INTO " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + " values('"+strings[i]+"'," + i + ","
                        + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
        }
        conn1.commit();
        conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(v1)");
        conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + "_2 ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(k3)");

        ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2");
        assertTrue(rs.next());
        HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
        HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
        tableDesc.removeCoprocessor(LocalIndexSplitter.class.getName());
        tableDesc.addCoprocessor(MockedLocalIndexSplitter.class.getName(), null,
            1, null);
        admin.disableTable(tableDesc.getTableName());
        admin.modifyTable(tableDesc.getTableName(), tableDesc);
        admin.enableTable(tableDesc.getTableName());
        TableName indexTable =
                TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
        HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
        indexTableDesc.removeCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName());
        indexTableDesc.addCoprocessor(MockedIndexHalfStoreFileReaderGenerator.class.getName(), null,
            1, null);
        admin.disableTable(indexTable);
        admin.modifyTable(indexTable, indexTableDesc);
        admin.enableTable(indexTable);

        admin.split(Bytes.toBytes(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"), ByteUtil.concat(Bytes.toBytes(strings[3])));
        List<HRegionInfo> regionsOfUserTable =
                admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));

        while (regionsOfUserTable.size() != 5) {
            Thread.sleep(100);
            regionsOfUserTable = admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
        }
        assertEquals(5, regionsOfUserTable.size());

        List<HRegionInfo> regionsOfIndexTable = admin.getTableRegions(indexTable);

        while (regionsOfIndexTable.size() != 5) {
            Thread.sleep(100);
            regionsOfIndexTable = admin.getTableRegions(indexTable);
        }

        assertEquals(5, regionsOfIndexTable.size());
        latch1.await();
        // Verify the metadata for index is correct.
        rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME,
                new String[] { PTableType.INDEX.toString() });
        assertTrue(rs.next());
        assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME, rs.getString(3));
        assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
        assertFalse(rs.next());
        rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2",
            new String[] { PTableType.INDEX.toString() });
        assertTrue(rs.next());
        assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2", rs.getString(3));
        assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
        assertFalse(rs.next());

        String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2";
        rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
        assertEquals("CLIENT PARALLEL " + 1 + "-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2",
            QueryUtil.getExplainPlan(rs));
        latch2.countDown();
   } finally {
        conn1.close();
        latch1.countDown();
        latch2.countDown();
    }
}
 
源代码8 项目: spliceengine   文件: CostEstimationIT.java
@Test
public void testCardinalityAfterTableSplit() throws Exception {
    SConfiguration config = HConfiguration.getConfiguration();
    HBaseTestingUtility testingUtility = new HBaseTestingUtility((Configuration) config.getConfigSource().unwrapDelegate());
    HBaseAdmin admin = testingUtility.getHBaseAdmin();
    TableName tableName = TableName.valueOf(config.getNamespace(),
            Long.toString(TestUtils.baseTableConglomerateId(spliceClassWatcher.getOrCreateConnection(),
                    spliceSchemaWatcher.toString(), "T2")));

    List<HRegionInfo> regions = admin.getTableRegions(tableName);
    int size1 = regions.size();

    if (size1 >= 2) {
        // expect number of partitions to be at least 2 if table split happens
        String sqlText = "explain select * from --splice-properties joinOrder=fixed \n" +
                "t1, t2 --splice-properties joinStrategy=NESTEDLOOP \n" +
                "where c1=c2";

        double outputRows = parseOutputRows(getExplainMessage(4, sqlText, methodWatcher));
        Assert.assertTrue(format("OutputRows is expected to be greater than 1, actual is %s", outputRows), outputRows > 1);

    /* split the table at value 30 */
        methodWatcher.executeUpdate(format("CALL SYSCS_UTIL.SYSCS_SPLIT_TABLE_OR_INDEX_AT_POINTS('%s', '%s', null, '%s')",
                spliceSchemaWatcher.toString(), "T2", "\\x9E"));

        regions = admin.getTableRegions(tableName);
        int size2 = regions.size();

        if (size2 >= 3) {
            // expect number of partitions to be at least 3 if table split happens
            /**The two newly split partitions do not have stats. Ideally, we should re-collect stats,
             * but if we haven't, explain should reflect the stats from the remaining partitions.
             * For current test case, t2 has some partition stats missing, without the fix of SPLICE-1452,
             * its cardinality estimation assumes unique for all non-null rows, which is too conservative,
             * so we end up estimating 1 output row from t2 for each outer table row from t1.
             * With SPLICE-1452's fix, we should see a higher number for the output row from t2.
             */
            outputRows = parseOutputRows(getExplainMessage(4, sqlText, methodWatcher));
            Assert.assertTrue(format("OutputRows is expected to be greater than 1, actual is %s", outputRows), outputRows > 1);
        }
    }
}