下面列出了org.apache.hadoop.hbase.client.HBaseAdmin#modifyTable ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public static void resetCoprocessor(String tableName, HBaseAdmin hbaseAdmin, Path hdfsCoprocessorJar) throws IOException {
logger.info("Disable " + tableName);
hbaseAdmin.disableTable(tableName);
logger.info("Unset coprocessor on " + tableName);
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
while (desc.hasCoprocessor(OBSERVER_CLS_NAME)) {
desc.removeCoprocessor(OBSERVER_CLS_NAME);
}
while (desc.hasCoprocessor(ENDPOINT_CLS_NAMAE)) {
desc.removeCoprocessor(ENDPOINT_CLS_NAMAE);
}
addCoprocessorOnHTable(desc, hdfsCoprocessorJar);
hbaseAdmin.modifyTable(tableName, desc);
logger.info("Enable " + tableName);
hbaseAdmin.enableTable(tableName);
}
@Test
public void testRetriveHtableHost() throws IOException {
Configuration conf = HBaseConfiguration.create();
HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
HTableDescriptor[] tableDescriptors = hbaseAdmin.listTables();
for (HTableDescriptor table : tableDescriptors) {
String value = table.getValue("KYLIN_HOST");
if (value != null) {
System.out.println(table.getTableName());
System.out.println("host is " + value);
hbaseAdmin.disableTable(table.getTableName());
table.setValue("KYLIN_HOST_ANOTHER", "dev02");
hbaseAdmin.modifyTable(table.getTableName(), table);
hbaseAdmin.enableTable(table.getTableName());
}
}
hbaseAdmin.close();
}
public static void main(String[] args) throws Exception {
String quorum = "192.168.0.30,192.168.0.31,192.168.0.32";
//quorum = "192.168.8.191,192.168.1.192,192.168.1.193";
int port = 2181;
String znode = "/hyperbase1";
HBaseConnPool connPool = new HBaseClientManager(quorum, port, znode);
HBaseDDLHandler ddlHandler = new HBaseDDLHandler(connPool);
String tableName = "demo_test";
System.out.println("=============================== : delete");
ddlHandler.deleteTable(tableName);
String columnFamily = "cf";
System.out.println("=============================== : create");
ddlHandler.createTable(tableName, columnFamily, "cf2");
System.out.println("=============================== : desc");
HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
System.out.println("=============================== : alter");
HBaseAdmin admin = new HBaseAdmin(connPool.getConn());
admin.disableTable(tableName);
HTableInterface htable = ddlHandler.getTable(tableName);
HTableDescriptor tableDesc = admin.getTableDescriptor(htable.getTableName());
tableDesc.removeFamily(Bytes.toBytes("cf2"));
HColumnDescriptor newhcd = new HColumnDescriptor("cf3");
newhcd.setMaxVersions(2);
newhcd.setKeepDeletedCells(KeepDeletedCells.TRUE);
tableDesc.addFamily(newhcd);
admin.modifyTable(tableName, tableDesc);
admin.enableTable(tableName);
admin.close();
System.out.println("=============================== : desc");
HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
System.out.println("=============================== : delete");
ddlHandler.deleteTable(tableName);
connPool.closeConn();
}
private void alter() throws IOException {
Configuration conf = HBaseConfiguration.create();
HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
HTableDescriptor table = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
hbaseAdmin.disableTable(table.getTableName());
table.setValue(metadataKey, metadataValue);
hbaseAdmin.modifyTable(table.getTableName(), table);
hbaseAdmin.enableTable(table.getTableName());
hbaseAdmin.close();
}
@Test
public void testReplicationWithMutableIndexes() throws Exception {
Connection conn = getConnection();
//create the primary and index tables
conn.createStatement().execute(
"CREATE TABLE " + DATA_TABLE_FULL_NAME
+ " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
conn.createStatement().execute(
"CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME
+ " (v1)");
// make sure that the tables are empty, but reachable
String query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
ResultSet rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
//make sure there is no data in the table
query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
// make sure the data tables are created on the remote cluster
HBaseAdmin admin = utility1.getHBaseAdmin();
HBaseAdmin admin2 = utility2.getHBaseAdmin();
List<String> dataTables = new ArrayList<String>();
dataTables.add(DATA_TABLE_FULL_NAME);
dataTables.add(INDEX_TABLE_FULL_NAME);
for (String tableName : dataTables) {
HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(tableName));
//create it as-is on the remote cluster
admin2.createTable(desc);
LOG.info("Enabling replication on source table: "+tableName);
HColumnDescriptor[] cols = desc.getColumnFamilies();
assertEquals(1, cols.length);
// add the replication scope to the column
HColumnDescriptor col = desc.removeFamily(cols[0].getName());
col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
desc.addFamily(col);
//disable/modify/enable table so it has replication enabled
admin.disableTable(desc.getTableName());
admin.modifyTable(tableName, desc);
admin.enableTable(desc.getTableName());
LOG.info("Replication enabled on source table: "+tableName);
}
// load some data into the source cluster table
PreparedStatement stmt =
conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
stmt.setString(1, "a"); // k
stmt.setString(2, "x"); // v1 <- has index
stmt.setString(3, "1"); // v2
stmt.execute();
conn.commit();
// make sure the index is working as expected
query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("x", rs.getString(1));
assertFalse(rs.next());
conn.close();
/*
Validate that we have replicated the rows to the remote cluster
*/
// other table can't be reached through Phoenix right now - would need to change how we
// lookup tables. For right now, we just go through an HTable
LOG.info("Looking up tables in replication target");
TableName[] tables = admin2.listTableNames();
HTable remoteTable = new HTable(utility2.getConfiguration(), tables[0]);
for (int i = 0; i < REPLICATION_RETRIES; i++) {
if (i >= REPLICATION_RETRIES - 1) {
fail("Waited too much time for put replication on table " + remoteTable
.getTableDescriptor().getNameAsString());
}
if (ensureAnyRows(remoteTable)) {
break;
}
LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
+ " for edits to get replicated");
Thread.sleep(REPLICATION_WAIT_TIME_MILLIS);
}
remoteTable.close();
}
@Test
public void testLocalIndexStateWhenSplittingInProgress() throws Exception {
createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME+"2", null, "('e','j','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
try{
String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
for (int i = 0; i < 26; i++) {
conn1.createStatement().execute(
"UPSERT INTO " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + " values('"+strings[i]+"'," + i + ","
+ (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
}
conn1.commit();
conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(v1)");
conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + "_2 ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(k3)");
ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2");
assertTrue(rs.next());
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
tableDesc.removeCoprocessor(LocalIndexSplitter.class.getName());
tableDesc.addCoprocessor(MockedLocalIndexSplitter.class.getName(), null,
1, null);
admin.disableTable(tableDesc.getTableName());
admin.modifyTable(tableDesc.getTableName(), tableDesc);
admin.enableTable(tableDesc.getTableName());
TableName indexTable =
TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
indexTableDesc.removeCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName());
indexTableDesc.addCoprocessor(MockedIndexHalfStoreFileReaderGenerator.class.getName(), null,
1, null);
admin.disableTable(indexTable);
admin.modifyTable(indexTable, indexTableDesc);
admin.enableTable(indexTable);
admin.split(Bytes.toBytes(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"), ByteUtil.concat(Bytes.toBytes(strings[3])));
List<HRegionInfo> regionsOfUserTable =
admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
while (regionsOfUserTable.size() != 5) {
Thread.sleep(100);
regionsOfUserTable = admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
}
assertEquals(5, regionsOfUserTable.size());
List<HRegionInfo> regionsOfIndexTable = admin.getTableRegions(indexTable);
while (regionsOfIndexTable.size() != 5) {
Thread.sleep(100);
regionsOfIndexTable = admin.getTableRegions(indexTable);
}
assertEquals(5, regionsOfIndexTable.size());
latch1.await();
// Verify the metadata for index is correct.
rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME,
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME, rs.getString(3));
assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
assertFalse(rs.next());
rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2",
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2", rs.getString(3));
assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
assertFalse(rs.next());
String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2";
rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
assertEquals("CLIENT PARALLEL " + 1 + "-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2",
QueryUtil.getExplainPlan(rs));
latch2.countDown();
} finally {
conn1.close();
latch1.countDown();
latch2.countDown();
}
}