org.apache.hadoop.hbase.client.HBaseAdmin#disableTable ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.client.HBaseAdmin#disableTable ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: SpyGlass   文件: GenerateTestingHTables.java
/**
 * Method to disable and delete HBase Tables i.e. "int-test-01"
 */
private static void deleteTestTable(String tableName) throws IOException {

	// Reset configuration
	config.clear();
	config.set("hbase.zookeeper.quorum", QUORUM);
	config.set("hbase.zookeeper.property.clientPort", QUORUM_PORT);

	HBaseAdmin hbase = new HBaseAdmin(config);

	if (hbase.tableExists(tableName)) {
		LOG.info("Table: " + tableName + " exists.");
		hbase.disableTable(tableName);
		hbase.deleteTable(tableName);
		LOG.info("Table: " + tableName + " disabled and deleted.");
	} else {
		LOG.info("Table: " + tableName + " does not exist.");
	}

	hbase.close();
}
 
源代码2 项目: attic-apex-malhar   文件: HBaseWindowStore.java
@Override
public void connect() throws IOException
{
  super.connect();
  HTableDescriptor tdesc = table.getTableDescriptor();
  if (!tdesc.hasFamily(columnFamilyBytes)) {
    HBaseAdmin admin = new HBaseAdmin(table.getConfiguration());
    admin.disableTable(table.getTableName());
    try {
      HColumnDescriptor cdesc = new HColumnDescriptor(columnFamilyBytes);
      admin.addColumn(table.getTableName(), cdesc);
    } finally {
      admin.enableTable(table.getTableName());
      admin.close();
    }
  }
}
 
源代码3 项目: bigdata-tutorial   文件: HBaseDDLHandler.java
/**
 * @param tableName
 * @return
 */
public boolean deleteTable(String tableName) throws IOException {

	HBaseAdmin admin = new HBaseAdmin(getConnPool().getConn());
	if (admin.tableExists(tableName)) {
		try {
			if (admin.isTableEnabled(tableName)) {
				admin.disableTable(tableName);
			}
			admin.deleteTable(tableName);
			LOGGER.info(">>>> Table {} delete success!", tableName);
		} catch (Exception ex) {
			LOGGER.error("delete table error:", ex);
			return false;
		}
	} else {
		LOGGER.warn(">>>> Table {} delete but not exist.", tableName);
	}
	admin.close();
	return true;
}
 
源代码4 项目: phoenix   文件: NativeHBaseTypesTest.java
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_NATIVE_BYTES);
            admin.deleteTable(HBASE_NATIVE_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }
        HTableDescriptor descriptor = new HTableDescriptor(HBASE_NATIVE_BYTES);
        HColumnDescriptor columnDescriptor =  new HColumnDescriptor(FAMILY_NAME);
        columnDescriptor.setKeepDeletedCells(true);
        descriptor.addFamily(columnDescriptor);
        admin.createTable(descriptor, SPLITS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
源代码5 项目: phoenix   文件: ReverseScanIT.java
@BeforeClass
@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
    // Ensures our split points will be used
    // TODO: do deletePriorTables before test?
    Connection conn = DriverManager.getConnection(getUrl());
    HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    try {
        admin.disableTable(TestUtil.ATABLE_NAME);
        admin.deleteTable(TestUtil.ATABLE_NAME);
    } catch (TableNotFoundException e) {
    } finally {
        admin.close();
        conn.close();
    }
 }
 
源代码6 项目: hiped2   文件: HBaseWriter.java
public static void createTableAndColumn(Configuration conf,
                                        String table,
                                        byte[] columnFamily)
    throws IOException {
  HBaseAdmin hbase = new HBaseAdmin(conf);
  HTableDescriptor desc = new HTableDescriptor(table);
  HColumnDescriptor meta = new HColumnDescriptor(columnFamily);
  desc.addFamily(meta);
  if (hbase.tableExists(table)) {
    if(hbase.isTableEnabled(table)) {
      hbase.disableTable(table);
    }
    hbase.deleteTable(table);
  }
  hbase.createTable(desc);
}
 
源代码7 项目: phoenix   文件: NativeHBaseTypesIT.java
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_NATIVE_BYTES);
            admin.deleteTable(HBASE_NATIVE_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
        }
        @SuppressWarnings("deprecation")
        HTableDescriptor descriptor = new HTableDescriptor(HBASE_NATIVE_BYTES);
        HColumnDescriptor columnDescriptor =  new HColumnDescriptor(FAMILY_NAME);
        columnDescriptor.setKeepDeletedCells(true);
        descriptor.addFamily(columnDescriptor);
        admin.createTable(descriptor, SPLITS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
源代码8 项目: phoenix   文件: ProductMetricsTest.java
private static void destroyTable() throws Exception {
    // Physically delete HBase table so that splits occur as expected for each test
    Properties props = new Properties(TEST_PROPERTIES);
    ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        try {
            admin.disableTable(PRODUCT_METRICS_NAME);
            admin.deleteTable(PRODUCT_METRICS_NAME);
        } catch (TableNotFoundException e) {
        }
   } finally {
            admin.close();
    }
}
 
private static void deleteHbaseTable(String tableName, Configuration configuration) throws Exception {

        final HBaseAdmin admin = new HBaseAdmin(configuration);
        if (admin.tableExists(tableName)) {
            admin.disableTable(tableName);
            admin.deleteTable(tableName);
        }
    }
 
源代码10 项目: Kylin   文件: HtableAlterMetadataCLI.java
private void alter() throws IOException {
    Configuration conf = HBaseConfiguration.create();
    HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
    HTableDescriptor table = hbaseAdmin.getTableDescriptor(TableName.valueOf(tableName));

    hbaseAdmin.disableTable(table.getTableName());
    table.setValue(metadataKey, metadataValue);
    hbaseAdmin.modifyTable(table.getTableName(), table);
    hbaseAdmin.enableTable(table.getTableName());
    hbaseAdmin.close();
}
 
源代码11 项目: bigdata-tutorial   文件: HBaseDDLHandlerTest.java
public static void main(String[] args) throws Exception {
	String quorum = "192.168.0.30,192.168.0.31,192.168.0.32";
	//quorum = "192.168.8.191,192.168.1.192,192.168.1.193";
	int port = 2181;
	String znode = "/hyperbase1";
	HBaseConnPool connPool = new HBaseClientManager(quorum, port, znode);
	HBaseDDLHandler ddlHandler = new HBaseDDLHandler(connPool);

	String tableName = "demo_test";
	System.out.println("=============================== : delete");
	ddlHandler.deleteTable(tableName);

	String columnFamily = "cf";
	System.out.println("=============================== : create");
	ddlHandler.createTable(tableName, columnFamily, "cf2");

	System.out.println("=============================== : desc");
	HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
	System.out.println("=============================== : alter");
	HBaseAdmin admin = new HBaseAdmin(connPool.getConn());
	admin.disableTable(tableName);
	HTableInterface htable = ddlHandler.getTable(tableName);
	HTableDescriptor tableDesc = admin.getTableDescriptor(htable.getTableName());
	tableDesc.removeFamily(Bytes.toBytes("cf2"));
	HColumnDescriptor newhcd = new HColumnDescriptor("cf3");
	newhcd.setMaxVersions(2);
	newhcd.setKeepDeletedCells(KeepDeletedCells.TRUE);
	tableDesc.addFamily(newhcd);

	admin.modifyTable(tableName, tableDesc);
	admin.enableTable(tableName);
	admin.close();

	System.out.println("=============================== : desc");
	HBaseUtils.printTableInfo(ddlHandler.getTable(tableName));
	System.out.println("=============================== : delete");
	ddlHandler.deleteTable(tableName);

	connPool.closeConn();
}
 
源代码12 项目: hadoop-arch-book   文件: RemoveTables.java
public static void executeDeleteTables(Configuration config) throws IOException {
  HBaseAdmin admin = new HBaseAdmin(config);

  if (admin.tableExists(HBaseTableMetaModel.profileCacheTableName)) {
    admin.disableTable(HBaseTableMetaModel.profileCacheTableName);
    admin.deleteTable(HBaseTableMetaModel.profileCacheTableName);
  }

  if (admin.tableExists(HBaseTableMetaModel.validationRulesTableName)) {
    admin.disableTable(HBaseTableMetaModel.validationRulesTableName);
    admin.deleteTable(HBaseTableMetaModel.validationRulesTableName);
  }

  admin.close();
}
 
源代码13 项目: phoenix   文件: DynamicColumnIT.java
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_DYNAMIC_COLUMNS_BYTES);
            admin.deleteTable(HBASE_DYNAMIC_COLUMNS_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {}
        ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
源代码14 项目: phoenix   文件: DynamicColumnTest.java
@BeforeClass
public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
    try {
        try {
            admin.disableTable(HBASE_DYNAMIC_COLUMNS_BYTES);
            admin.deleteTable(HBASE_DYNAMIC_COLUMNS_BYTES);
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {}
        ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
        initTableValues();
    } finally {
        admin.close();
    }
}
 
源代码15 项目: phoenix   文件: ProductMetricsIT.java
private static void destroyTable() throws Exception {
    // Physically delete HBase table so that splits occur as expected for each test
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        try {
            admin.disableTable(PRODUCT_METRICS_NAME);
            admin.deleteTable(PRODUCT_METRICS_NAME);
        } catch (TableNotFoundException e) {
        }
   } finally {
            admin.close();
    }
}
 
源代码16 项目: kite   文件: UserProfileDatasetExample.java
/**
 * The constructor will start by registering the schemas with the meta store
 * table in HBase, and create the required tables to run.
 */
public UserProfileDatasetExample() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  HBaseAdmin admin = new HBaseAdmin(conf);

  // Delete the table if it exists so we start fresh.
  if (admin.tableExists("kite_example_user_profiles")) {
    admin.disableTable("kite_example_user_profiles");
    admin.deleteTable("kite_example_user_profiles");
  }

  HBaseDatasetRepository repo = new HBaseDatasetRepository.Builder()
      .configuration(conf).build();

  // TODO: change to use namespace (CDK-140)

  DatasetDescriptor userProfileDatasetDescriptor =
      new DatasetDescriptor.Builder().schema(UserProfileModel2.SCHEMA$).build();
  userProfileDataset = repo.create("default", "kite_example_user_profiles.UserProfileModel2",
      userProfileDatasetDescriptor);

  DatasetDescriptor userActionsDatasetDescriptor =
      new DatasetDescriptor.Builder().schema(UserActionsModel2.SCHEMA$).build();
  userActionsDataset = repo.create("default", "kite_example_user_profiles.UserActionsModel2",
      userActionsDatasetDescriptor);

  DatasetDescriptor userProfileActionsDatasetDescriptor =
      new DatasetDescriptor.Builder().schema(UserProfileActionsModel2.SCHEMA$).build();
  userProfileActionsDataset = repo.create("default", "kite_example_user_profiles.UserProfileActionsProtocol2",
      userProfileActionsDatasetDescriptor);

}
 
源代码17 项目: learning-hadoop   文件: TableBuilder.java
/**
 * @param args
 */
public static void main(String[] args) {
  Configuration conf = HBaseConfiguration.create();
  

  byte[] columnFamily = Bytes.toBytes("f");

  String tableName = "t";

  try {
    ZKUtil.applyClusterKeyToConf(conf, "edh1:2181:/hbase");
    HBaseAdmin hba = new HBaseAdmin(conf);
    if (hba.tableExists(tableName)) {
      hba.disableTable(tableName);
      hba.deleteTable(tableName);
    }
    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
    HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily);
    columnDescriptor.setMaxVersions(1);
    columnDescriptor.setBloomFilterType(BloomType.ROW);
    tableDescriptor.addFamily(columnDescriptor);
    hba.createTable(tableDescriptor);
    hba.close();
  } catch (IOException e) {
    e.printStackTrace();
  }

}
 
源代码18 项目: eagle   文件: CoprocessorToolITSuite.java
private void deleteTable() throws IOException {
    HBaseAdmin admin = new HBaseAdmin(new Configuration());
    admin.disableTable(TableName.valueOf(toolITTableName));
    admin.deleteTable(TableName.valueOf(toolITTableName));
    admin.close();
}
 
/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening Region again. Verify
 * seqids.
 * @throws Exception on failure
 */
@Test
public void testReplayEditsWrittenViaHRegion() throws Exception {
  final String tableNameStr = "testReplayEditsWrittenViaHRegion";
  final HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableNameStr), null, null, false);
  final Path basedir = new Path(this.hbaseRootDir, tableNameStr);
  deleteDir(basedir);
  final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
  
  //setup basic indexing for the table
  // enable indexing to a non-existant index table
  byte[] family = new byte[] { 'a' };
  ColumnGroup fam1 = new ColumnGroup(INDEX_TABLE_NAME);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(htd);

  // create the region + its WAL
  HRegion region0 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
  region0.close();
  region0.getLog().closeAndDelete();
  HLog wal = createWAL(this.conf);
  RegionServerServices mockRS = Mockito.mock(RegionServerServices.class);
  // mock out some of the internals of the RSS, so we can run CPs
  Mockito.when(mockRS.getWAL()).thenReturn(wal);
  RegionServerAccounting rsa = Mockito.mock(RegionServerAccounting.class);
  Mockito.when(mockRS.getRegionServerAccounting()).thenReturn(rsa);
  ServerName mockServerName = Mockito.mock(ServerName.class);
  Mockito.when(mockServerName.getServerName()).thenReturn(tableNameStr + "-server-1234");
  Mockito.when(mockRS.getServerName()).thenReturn(mockServerName);
  HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, mockRS);
  long seqid = region.initialize();
  // HRegionServer usually does this. It knows the largest seqid across all regions.
  wal.setSequenceNumber(seqid);
  
  //make an attempted write to the primary that should also be indexed
  byte[] rowkey = Bytes.toBytes("indexed_row_key");
  Put p = new Put(rowkey);
  p.add(family, Bytes.toBytes("qual"), Bytes.toBytes("value"));
  region.put(new Put[] { p });

  // we should then see the server go down
  Mockito.verify(mockRS, Mockito.times(1)).abort(Mockito.anyString(),
    Mockito.any(Exception.class));
  region.close(true);
  wal.close();

  // then create the index table so we are successful on WAL replay
  CoveredColumnIndexer.createIndexTable(UTIL.getHBaseAdmin(), INDEX_TABLE_NAME);

  // run the WAL split and setup the region
  runWALSplit(this.conf);
  HLog wal2 = createWAL(this.conf);
  HRegion region1 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, mockRS);

  // initialize the region - this should replay the WALEdits from the WAL
  region1.initialize();

  // now check to ensure that we wrote to the index table
  HTable index = new HTable(UTIL.getConfiguration(), INDEX_TABLE_NAME);
  int indexSize = getKeyValueCount(index);
  assertEquals("Index wasn't propertly updated from WAL replay!", 1, indexSize);
  Get g = new Get(rowkey);
  final Result result = region1.get(g);
  assertEquals("Primary region wasn't updated from WAL replay!", 1, result.size());

  // cleanup the index table
  HBaseAdmin admin = UTIL.getHBaseAdmin();
  admin.disableTable(INDEX_TABLE_NAME);
  admin.deleteTable(INDEX_TABLE_NAME);
  admin.close();
}
 
源代码20 项目: phoenix   文件: LocalIndexIT.java
@Test
public void testLocalIndexStateWhenSplittingInProgress() throws Exception {
    createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME+"2", null, "('e','j','o')");
    Connection conn1 = DriverManager.getConnection(getUrl());
    try{
        String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
        for (int i = 0; i < 26; i++) {
            conn1.createStatement().execute(
                "UPSERT INTO " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + " values('"+strings[i]+"'," + i + ","
                        + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
        }
        conn1.commit();
        conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(v1)");
        conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + "_2 ON " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2" + "(k3)");

        ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2");
        assertTrue(rs.next());
        HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
        HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
        tableDesc.removeCoprocessor(LocalIndexSplitter.class.getName());
        tableDesc.addCoprocessor(MockedLocalIndexSplitter.class.getName(), null,
            1, null);
        admin.disableTable(tableDesc.getTableName());
        admin.modifyTable(tableDesc.getTableName(), tableDesc);
        admin.enableTable(tableDesc.getTableName());
        TableName indexTable =
                TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
        HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
        indexTableDesc.removeCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName());
        indexTableDesc.addCoprocessor(MockedIndexHalfStoreFileReaderGenerator.class.getName(), null,
            1, null);
        admin.disableTable(indexTable);
        admin.modifyTable(indexTable, indexTableDesc);
        admin.enableTable(indexTable);

        admin.split(Bytes.toBytes(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"), ByteUtil.concat(Bytes.toBytes(strings[3])));
        List<HRegionInfo> regionsOfUserTable =
                admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));

        while (regionsOfUserTable.size() != 5) {
            Thread.sleep(100);
            regionsOfUserTable = admin.getTableRegions(TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME+"2"));
        }
        assertEquals(5, regionsOfUserTable.size());

        List<HRegionInfo> regionsOfIndexTable = admin.getTableRegions(indexTable);

        while (regionsOfIndexTable.size() != 5) {
            Thread.sleep(100);
            regionsOfIndexTable = admin.getTableRegions(indexTable);
        }

        assertEquals(5, regionsOfIndexTable.size());
        latch1.await();
        // Verify the metadata for index is correct.
        rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME,
                new String[] { PTableType.INDEX.toString() });
        assertTrue(rs.next());
        assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME, rs.getString(3));
        assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
        assertFalse(rs.next());
        rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2",
            new String[] { PTableType.INDEX.toString() });
        assertTrue(rs.next());
        assertEquals(TestUtil.DEFAULT_INDEX_TABLE_NAME+"_2", rs.getString(3));
        assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
        assertFalse(rs.next());

        String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2";
        rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
        assertEquals("CLIENT PARALLEL " + 1 + "-WAY FULL SCAN OVER " + TestUtil.DEFAULT_DATA_TABLE_NAME+"2",
            QueryUtil.getExplainPlan(rs));
        latch2.countDown();
   } finally {
        conn1.close();
        latch1.countDown();
        latch2.countDown();
    }
}