下面列出了org.apache.hadoop.hbase.client.HBaseAdmin#enableTable ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
@BeforeMethod
public void setUp() throws Exception {
HBaseAdmin admin = testutil.getHBaseAdmin();
if (!admin.tableExists(TableName.valueOf(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME))) {
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor datafam = new HColumnDescriptor(DEFAULT_TIMESTAMP_STORAGE_CF_NAME);
datafam.setMaxVersions(Integer.MAX_VALUE);
desc.addFamily(datafam);
admin.createTable(desc);
}
if (admin.isTableDisabled(TableName.valueOf(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME))) {
admin.enableTable(TableName.valueOf(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME));
}
HTableDescriptor[] tables = admin.listTables();
for (HTableDescriptor t : tables) {
LOG.info(t.getNameAsString());
}
}
@Override
public void connect() throws IOException
{
super.connect();
HTableDescriptor tdesc = table.getTableDescriptor();
if (!tdesc.hasFamily(columnFamilyBytes)) {
HBaseAdmin admin = new HBaseAdmin(table.getConfiguration());
admin.disableTable(table.getTableName());
try {
HColumnDescriptor cdesc = new HColumnDescriptor(columnFamilyBytes);
admin.addColumn(table.getTableName(), cdesc);
} finally {
admin.enableTable(table.getTableName());
admin.close();
}
}
}
public static void resetCoprocessor(String tableName, HBaseAdmin hbaseAdmin, Path hdfsCoprocessorJar) throws IOException {
logger.info("Disable " + tableName);
hbaseAdmin.disableTable(tableName);
logger.info("Unset coprocessor on " + tableName);
HTableDescriptor desc = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
while (desc.hasCoprocessor(OBSERVER_CLS_NAME)) {
desc.removeCoprocessor(OBSERVER_CLS_NAME);
}
while (desc.hasCoprocessor(ENDPOINT_CLS_NAMAE)) {
desc.removeCoprocessor(ENDPOINT_CLS_NAMAE);
}
addCoprocessorOnHTable(desc, hdfsCoprocessorJar);
hbaseAdmin.modifyTable(tableName, desc);
logger.info("Enable " + tableName);
hbaseAdmin.enableTable(tableName);
}
@Test
public void testRetriveHtableHost() throws IOException {
Configuration conf = HBaseConfiguration.create();
HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
HTableDescriptor[] tableDescriptors = hbaseAdmin.listTables();
for (HTableDescriptor table : tableDescriptors) {
String value = table.getValue("KYLIN_HOST");
if (value != null) {
System.out.println(table.getTableName());
System.out.println("host is " + value);
hbaseAdmin.disableTable(table.getTableName());
table.setValue("KYLIN_HOST_ANOTHER", "dev02");
hbaseAdmin.modifyTable(table.getTableName(), table);
hbaseAdmin.enableTable(table.getTableName());
}
}
hbaseAdmin.close();
}
public static void main(String[] args) throws Exception {
String quorum = "192.168.0.30,192.168.0.31,192.168.0.32";
//quorum = "192.168.8.191,192.168.1.192,192.168.1.193";
int port = 2181;
String znode = "/hyperbase1";
HBaseConnPool connPool = new HBaseClientManager(quorum, port, znode);
HBaseDDLHandler ddlHandler = new HBaseDDLHandler(connPool);
String tableName = "demo_test";
System.out.println("=============================== : delete");
ddlHandler.deleteTable(tableName);
String columnFamily = "cf";
System.out.println("=============================== : create");
ddlHandler.createTable(tableName, columnFamily, "cf2");
System.out.println("=============================== : desc");
HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
System.out.println("=============================== : alter");
HBaseAdmin admin = new HBaseAdmin(connPool.getConn());
admin.disableTable(tableName);
HTableInterface htable = ddlHandler.getTable(tableName);
HTableDescriptor tableDesc = admin.getTableDescriptor(htable.getTableName());
tableDesc.removeFamily(Bytes.toBytes("cf2"));
HColumnDescriptor newhcd = new HColumnDescriptor("cf3");
newhcd.setMaxVersions(2);
newhcd.setKeepDeletedCells(KeepDeletedCells.TRUE);
tableDesc.addFamily(newhcd);
admin.modifyTable(tableName, tableDesc);
admin.enableTable(tableName);
admin.close();
System.out.println("=============================== : desc");
HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
System.out.println("=============================== : delete");
ddlHandler.deleteTable(tableName);
connPool.closeConn();
}
private void alter() throws IOException {
Configuration conf = HBaseConfiguration.create();
HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
HTableDescriptor table = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));
hbaseAdmin.disableTable(table.getTableName());
table.setValue(metadataKey, metadataValue);
hbaseAdmin.modifyTable(table.getTableName(), table);
hbaseAdmin.enableTable(table.getTableName());
hbaseAdmin.close();
}
@Test
public void testReplicationWithMutableIndexes() throws Exception {
Connection conn = getConnection();
//create the primary and index tables
conn.createStatement().execute(
"CREATE TABLE " + DATA_TABLE_FULL_NAME
+ " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
conn.createStatement().execute(
"CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME
+ " (v1)");
// make sure that the tables are empty, but reachable
String query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
ResultSet rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
//make sure there is no data in the table
query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertFalse(rs.next());
// make sure the data tables are created on the remote cluster
HBaseAdmin admin = utility1.getHBaseAdmin();
HBaseAdmin admin2 = utility2.getHBaseAdmin();
List<String> dataTables = new ArrayList<String>();
dataTables.add(DATA_TABLE_FULL_NAME);
dataTables.add(INDEX_TABLE_FULL_NAME);
for (String tableName : dataTables) {
HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(tableName));
//create it as-is on the remote cluster
admin2.createTable(desc);
LOG.info("Enabling replication on source table: "+tableName);
HColumnDescriptor[] cols = desc.getColumnFamilies();
assertEquals(1, cols.length);
// add the replication scope to the column
HColumnDescriptor col = desc.removeFamily(cols[0].getName());
col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
desc.addFamily(col);
//disable/modify/enable table so it has replication enabled
admin.disableTable(desc.getTableName());
admin.modifyTable(tableName, desc);
admin.enableTable(desc.getTableName());
LOG.info("Replication enabled on source table: "+tableName);
}
// load some data into the source cluster table
PreparedStatement stmt =
conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)");
stmt.setString(1, "a"); // k
stmt.setString(2, "x"); // v1 <- has index
stmt.setString(3, "1"); // v2
stmt.execute();
conn.commit();
// make sure the index is working as expected
query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("x", rs.getString(1));
assertFalse(rs.next());
conn.close();
/*
Validate that we have replicated the rows to the remote cluster
*/
// other table can't be reached through Phoenix right now - would need to change how we
// lookup tables. For right now, we just go through an HTable
LOG.info("Looking up tables in replication target");
TableName[] tables = admin2.listTableNames();
HTable remoteTable = new HTable(utility2.getConfiguration(), tables[0]);
for (int i = 0; i < REPLICATION_RETRIES; i++) {
if (i >= REPLICATION_RETRIES - 1) {
fail("Waited too much time for put replication on table " + remoteTable
.getTableDescriptor().getNameAsString());
}
if (ensureAnyRows(remoteTable)) {
break;
}
LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS
+ " for edits to get replicated");
Thread.sleep(REPLICATION_WAIT_TIME_MILLIS);
}
remoteTable.close();
}
@Test
public void testLocalIndexStateWhenSplittingInProgress() throws Exception {
createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME+"2", null, "('e','j','o')");
Connection conn1 = DriverManager.getConnection(getUrl());
try{
String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
for (int i = 0; i < 26; i++) {
conn1.createStatement().execute(
"UPSERT INTO " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + " values('"+strings[i]+"'," + i + ","
+ (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
}
conn1.commit();
conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(v1)");
conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + "_2 ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(k3)");
ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2");
assertTrue(rs.next());
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
tableDesc.removeCoprocessor(LocalIndexSplitter.class.getName());
tableDesc.addCoprocessor(MockedLocalIndexSplitter.class.getName(), null,
1, null);
admin.disableTable(tableDesc.getTableName());
admin.modifyTable(tableDesc.getTableName(), tableDesc);
admin.enableTable(tableDesc.getTableName());
TableName indexTable =
TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
indexTableDesc.removeCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName());
indexTableDesc.addCoprocessor(MockedIndexHalfStoreFileReaderGenerator.class.getName(), null,
1, null);
admin.disableTable(indexTable);
admin.modifyTable(indexTable, indexTableDesc);
admin.enableTable(indexTable);
admin.split(Bytes.toBytes(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"), ByteUtil.concat(Bytes.toBytes(strings[3])));
List<HRegionInfo> regionsOfUserTable =
admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
while (regionsOfUserTable.size() != 5) {
Thread.sleep(100);
regionsOfUserTable = admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
}
assertEquals(5, regionsOfUserTable.size());
List<HRegionInfo> regionsOfIndexTable = admin.getTableRegions(indexTable);
while (regionsOfIndexTable.size() != 5) {
Thread.sleep(100);
regionsOfIndexTable = admin.getTableRegions(indexTable);
}
assertEquals(5, regionsOfIndexTable.size());
latch1.await();
// Verify the metadata for index is correct.
rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME,
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME, rs.getString(3));
assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
assertFalse(rs.next());
rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2",
new String[] { PTableType.INDEX.toString() });
assertTrue(rs.next());
assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2", rs.getString(3));
assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
assertFalse(rs.next());
String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2";
rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
assertEquals("CLIENT PARALLEL " + 1 + "-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2",
QueryUtil.getExplainPlan(rs));
latch2.countDown();
} finally {
conn1.close();
latch1.countDown();
latch2.countDown();
}
}
@Test
public void testCreateOnExistingTable() throws Exception {
PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
String tableName = MDTEST_NAME;
String schemaName = MDTEST_SCHEMA_NAME;
byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
byte[] cfC = Bytes.toBytes("c");
byte[][] familyNames = new byte[][] {cfB, cfC};
byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
HBaseAdmin admin = pconn.getQueryServices().getAdmin();
try {
admin.disableTable(htableName);
admin.deleteTable(htableName);
admin.enableTable(htableName);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
@SuppressWarnings("deprecation")
HTableDescriptor descriptor = new HTableDescriptor(htableName);
for (byte[] familyName : familyNames) {
HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
descriptor.addFamily(columnDescriptor);
}
admin.createTable(descriptor);
long ts = nextTimestamp();
Properties props = new Properties();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
PhoenixConnection conn1 = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
ensureTableCreated(getUrl(), tableName, null, ts);
descriptor = admin.getTableDescriptor(htableName);
assertEquals(3,descriptor.getColumnFamilies().length);
HColumnDescriptor cdA = descriptor.getFamily(cfA);
assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCellsAsEnum());
assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using WITH
assertEquals(1,cdA.getMaxVersions());// Overriden using WITH
HColumnDescriptor cdB = descriptor.getFamily(cfB);
// Allow KEEP_DELETED_CELLS to be false for VIEW
assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCellsAsEnum());
assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the original value.
// CF c should stay the same since it's not a Phoenix cf.
HColumnDescriptor cdC = descriptor.getFamily(cfC);
assertNotNull("Column family not found", cdC);
assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCellsAsEnum());
assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
admin.close();
int rowCount = 5;
String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
PreparedStatement ps = conn1.prepareStatement(upsert);
for (int i = 0; i < rowCount; i++) {
ps.setString(1, Integer.toString(i));
ps.setInt(2, i+1);
ps.setInt(3, i+2);
ps.execute();
}
conn1.commit();
conn1.close();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
Connection conn2 = DriverManager.getConnection(getUrl(), props);
String query = "SELECT count(1) FROM " + tableName;
ResultSet rs = conn2.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(rowCount, rs.getLong(1));
query = "SELECT id, col1,col2 FROM " + tableName;
rs = conn2.createStatement().executeQuery(query);
for (int i = 0; i < rowCount; i++) {
assertTrue(rs.next());
assertEquals(Integer.toString(i),rs.getString(1));
assertEquals(i+1, rs.getInt(2));
assertEquals(i+2, rs.getInt(3));
}
assertFalse(rs.next());
conn2.close();
}
@Test
public void testCreateOnExistingTable() throws Exception {
PhoenixConnection pconn = DriverManager.getConnection(PHOENIX_JDBC_URL, TEST_PROPERTIES).unwrap(PhoenixConnection.class);
String tableName = MDTEST_NAME;
String schemaName = MDTEST_SCHEMA_NAME;
byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
byte[] cfC = Bytes.toBytes("c");
byte[][] familyNames = new byte[][] {cfB, cfC};
byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
HBaseAdmin admin = pconn.getQueryServices().getAdmin();
try {
admin.disableTable(htableName);
admin.deleteTable(htableName);
admin.enableTable(htableName);
} catch (org.apache.hadoop.hbase.TableNotFoundException e) {
}
HTableDescriptor descriptor = new HTableDescriptor(htableName);
for (byte[] familyName : familyNames) {
HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
descriptor.addFamily(columnDescriptor);
}
admin.createTable(descriptor);
long ts = nextTimestamp();
Properties props = new Properties();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
PhoenixConnection conn1 = DriverManager.getConnection(PHOENIX_JDBC_URL, props).unwrap(PhoenixConnection.class);
ensureTableCreated(getUrl(), tableName, null, ts);
descriptor = admin.getTableDescriptor(htableName);
assertEquals(3,descriptor.getColumnFamilies().length);
HColumnDescriptor cdA = descriptor.getFamily(cfA);
assertTrue(cdA.getKeepDeletedCells());
assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using WITH
assertEquals(1,cdA.getMaxVersions());// Overriden using WITH
HColumnDescriptor cdB = descriptor.getFamily(cfB);
assertTrue(cdB.getKeepDeletedCells());
assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the original value.
// CF c should stay the same since it's not a Phoenix cf.
HColumnDescriptor cdC = descriptor.getFamily(cfC);
assertNotNull("Column family not found", cdC);
assertFalse(cdC.getKeepDeletedCells());
assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
admin.close();
int rowCount = 5;
String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
PreparedStatement ps = conn1.prepareStatement(upsert);
for (int i = 0; i < rowCount; i++) {
ps.setString(1, Integer.toString(i));
ps.setInt(2, i+1);
ps.setInt(3, i+2);
ps.execute();
}
conn1.commit();
conn1.close();
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
Connection conn2 = DriverManager.getConnection(PHOENIX_JDBC_URL, props);
String query = "SELECT count(1) FROM " + tableName;
ResultSet rs = conn2.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals(rowCount, rs.getLong(1));
query = "SELECT id, col1,col2 FROM " + tableName;
rs = conn2.createStatement().executeQuery(query);
for (int i = 0; i < rowCount; i++) {
assertTrue(rs.next());
assertEquals(Integer.toString(i),rs.getString(1));
assertEquals(i+1, rs.getInt(2));
assertEquals(i+2, rs.getInt(3));
}
assertFalse(rs.next());
conn2.close();
}