下面列出了org.apache.hadoop.hbase.TableName#getNamespaceAsString ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
private void deleteTableInstance(HBaseOperationContext hbaseOperationContext) {
TableName tableName = hbaseOperationContext.getTableName();
String nameSpaceName = tableName.getNamespaceAsString();
if (nameSpaceName == null) {
nameSpaceName = tableName.getNameWithNamespaceInclAsString();
}
String tableNameStr = tableName.getNameAsString();
String tableQName = getTableQualifiedName(getMetadataNamespace(), nameSpaceName, tableNameStr);
AtlasObjectId tableId = new AtlasObjectId(HBaseDataTypes.HBASE_TABLE.getName(), REFERENCEABLE_ATTRIBUTE_NAME, tableQName);
LOG.info("Delete Table {}", tableQName);
hbaseOperationContext.addMessage(new EntityDeleteRequestV2(hbaseOperationContext.getUser(), Collections.singletonList(tableId)));
}
private void deleteColumnFamilyInstance(HBaseOperationContext hbaseOperationContext) {
TableName tableName = hbaseOperationContext.getTableName();
String nameSpaceName = tableName.getNamespaceAsString();
if (nameSpaceName == null) {
nameSpaceName = tableName.getNameWithNamespaceInclAsString();
}
String tableNameStr = tableName.getNameAsString();
String columnFamilyName = hbaseOperationContext.getColummFamily();
String columnFamilyQName = getColumnFamilyQualifiedName(getMetadataNamespace(), nameSpaceName, tableNameStr, columnFamilyName);
AtlasObjectId columnFamilyId = new AtlasObjectId(HBaseDataTypes.HBASE_COLUMN_FAMILY.getName(), REFERENCEABLE_ATTRIBUTE_NAME, columnFamilyQName);
LOG.info("Delete ColumnFamily {}", columnFamilyQName);
hbaseOperationContext.addMessage(new EntityDeleteRequestV2(hbaseOperationContext.getUser(), Collections.singletonList(columnFamilyId)));
}
public <T extends InternalScanner> T wrapScannerWithOps(
final TableName tableName,
final T scanner,
final Scan scan,
final ServerOpScope scope,
final ScannerWrapperFactory<T> factory) {
if (!tableName.isSystemTable()) {
final String namespace = tableName.getNamespaceAsString();
final String qualifier = tableName.getQualifierAsString();
final Collection<HBaseServerOp> orderedServerOps =
opStore.getOperations(namespace, qualifier, scope);
if (!orderedServerOps.isEmpty()) {
return factory.createScannerWrapper(orderedServerOps, scanner, scan);
}
}
return scanner;
}
/**
* Suspend the procedure if the specified table is already locked.
* Other operations in the table-queue will be executed after the lock is released.
* @param procedure the procedure trying to acquire the lock
* @param table Table to lock
* @return true if the procedure has to wait for the table to be available
*/
public boolean waitTableExclusiveLock(final Procedure<?> procedure, final TableName table) {
schedLock();
try {
final String namespace = table.getNamespaceAsString();
final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace);
final LockAndQueue tableLock = locking.getTableLock(table);
if (!namespaceLock.trySharedLock(procedure)) {
waitProcedure(namespaceLock, procedure);
logLockedResource(LockedResourceType.NAMESPACE, namespace);
return true;
}
if (!tableLock.tryExclusiveLock(procedure)) {
namespaceLock.releaseSharedLock();
waitProcedure(tableLock, procedure);
logLockedResource(LockedResourceType.TABLE, table.getNameAsString());
return true;
}
removeFromRunQueue(tableRunQueue, getTableQueue(table),
() -> procedure + " held the exclusive lock");
return false;
} finally {
schedUnlock();
}
}
/**
* Check if adding a region violates namespace quota, if not update namespace cache.
*
* @param name
* @param regionName
* @param incr
* @return true, if region can be added to table.
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized boolean checkAndUpdateNamespaceRegionCount(TableName name,
byte[] regionName, int incr) throws IOException {
String namespace = name.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus;
currentStatus = getState(namespace);
int regionCount = currentStatus.getRegionCount();
long maxRegionCount = TableNamespaceManager.getMaxRegions(nspdesc);
if (incr > 0 && regionCount >= maxRegionCount) {
LOG.warn("The region " + Bytes.toStringBinary(regionName)
+ " cannot be created. The region count will exceed quota on the namespace. "
+ "This may be transient, please retry later if there are any ongoing split"
+ " operations in the namespace.");
return false;
}
NamespaceTableAndRegionInfo nsInfo = nsStateCache.get(namespace);
if (nsInfo != null) {
nsInfo.incRegionCountForTable(name, incr);
} else {
LOG.warn("Namespace state found null for namespace : " + namespace);
}
}
return true;
}
/**
* Check and update region count for an existing table. To handle scenarios like restore snapshot
* @param name name of the table for region count needs to be checked and updated
* @param incr count of regions
* @throws QuotaExceededException if quota exceeds for the number of regions allowed in a
* namespace
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized void checkAndUpdateNamespaceRegionCount(TableName name, int incr)
throws IOException {
String namespace = name.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus = getState(namespace);
int regionCountOfTable = currentStatus.getRegionCountOfTable(name);
if ((currentStatus.getRegionCount() - regionCountOfTable + incr) > TableNamespaceManager
.getMaxRegions(nspdesc)) {
throw new QuotaExceededException("The table " + name.getNameAsString()
+ " region count cannot be updated as it would exceed maximum number "
+ "of regions allowed in the namespace. The total number of regions permitted is "
+ TableNamespaceManager.getMaxRegions(nspdesc));
}
currentStatus.removeTable(name);
currentStatus.addTable(name, incr);
}
}
public static HRecord generate(final TableName tableName, final List<Cell> cells) {
final byte[] rowkey = CellUtil.cloneRow(cells.get(0));
final List<HColumn> columns = cells.stream().map(cell -> {
byte[] family = CellUtil.cloneFamily(cell);
byte[] qualifier = CellUtil.cloneQualifier(cell);
byte[] value = CellUtil.cloneValue(cell);
byte type = cell.getTypeByte();
long timestamp = cell.getTimestamp();
return new HColumn(family, qualifier, value, type, timestamp);
}).collect(toList());
return new HRecord(tableName.getNamespaceAsString(), tableName.getNameAsString(), rowkey, columns);
}
private HBaseOperationContext handleHBaseTableOperation(TableDescriptor tableDescriptor, TableName tableName, OPERATION operation, UserGroupInformation ugi, String userName) {
if (LOG.isDebugEnabled()) {
LOG.debug("==> HBaseAtlasHook.handleHBaseTableOperation()");
}
Map<String, String> hbaseConf = null;
String owner = null;
String tableNameSpace = null;
TableName hbaseTableName = null;
ColumnFamilyDescriptor[] columnFamilyDescriptors = null;
if (tableDescriptor != null) {
owner = tableDescriptor.getOwnerString();
hbaseConf = null;
hbaseTableName = tableDescriptor.getTableName();
if (hbaseTableName != null) {
tableNameSpace = hbaseTableName.getNamespaceAsString();
if (tableNameSpace == null) {
tableNameSpace = hbaseTableName.getNameWithNamespaceInclAsString();
}
}
}
if (owner == null) {
owner = userName;
}
if (tableDescriptor != null) {
columnFamilyDescriptors = tableDescriptor.getColumnFamilies();
}
HBaseOperationContext hbaseOperationContext = new HBaseOperationContext(tableNameSpace, tableDescriptor, tableName, columnFamilyDescriptors, operation, ugi, userName, owner, hbaseConf);
createAtlasInstances(hbaseOperationContext);
if (LOG.isDebugEnabled()) {
LOG.debug("<== HBaseAtlasHook.handleHBaseTableOperation(): {}", hbaseOperationContext);
}
return hbaseOperationContext;
}
static GTable getTable(Humpback humpback, String fullTableName) {
GTable table = null;
TableName tableName = TableName.valueOf(fullTableName);
String ns = tableName.getNamespaceAsString();
String name = tableName.getNameAsString();
for (GTable t : humpback.getTables(ns)) {
TableName tn = getTableName(humpback, t.getId());
if (name.equals(tn.getNameAsString())) {
table = t;
break;
}
}
return table;
}
/**
* return value represent path for:
* ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
* @param tableName table name
* @return path to table archive
* @throws IOException exception
*/
Path getTableArchivePath(TableName tableName) throws IOException {
Path baseDir =
new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId),
HConstants.HFILE_ARCHIVE_DIRECTORY);
Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
Path tableArchivePath = new Path(archivePath, tableName.getQualifierAsString());
if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
tableArchivePath = null; // empty table has no archive
}
return tableArchivePath;
}
private void addTable(TableName tableName, int regionCount) throws IOException {
NamespaceTableAndRegionInfo info =
nsStateCache.get(tableName.getNamespaceAsString());
if(info != null) {
info.addTable(tableName, regionCount);
} else {
throw new IOException("Bad state : Namespace quota information not found for namespace : "
+ tableName.getNamespaceAsString());
}
}
private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
final TableName tableName = getTableName();
if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
setFailure("master-create-table", new TableExistsException(getTableName()));
return false;
}
// check that we have at least 1 CF
if (tableDescriptor.getColumnFamilyCount() == 0) {
setFailure("master-create-table", new DoNotRetryIOException(
"Table " + getTableName().toString() + " should have at least one column family."));
return false;
}
if (!tableName.isSystemTable()) {
// do not check rs group for system tables as we may block the bootstrap.
Supplier<String> forWhom = () -> "table " + tableName;
RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(
env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
tableDescriptor.getRegionServerGroup(), forWhom);
if (rsGroupInfo == null) {
// we do not set rs group info on table, check if we have one on namespace
String namespace = tableName.getNamespaceAsString();
NamespaceDescriptor nd = env.getMasterServices().getClusterSchema().getNamespace(namespace);
forWhom = () -> "table " + tableName + "(inherit from namespace)";
rsGroupInfo = MasterProcedureUtil.checkGroupExists(
env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
MasterProcedureUtil.getNamespaceGroup(nd), forWhom);
}
MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
}
return true;
}
/**
* Decide whether the table need replicate to the peer cluster
* @param table name of the table
* @return true if the table need replicate to the peer cluster
*/
public boolean needToReplicate(TableName table) {
String namespace = table.getNamespaceAsString();
if (replicateAllUserTables) {
// replicate all user tables, but filter by exclude namespaces and table-cfs config
if (excludeNamespaces != null && excludeNamespaces.contains(namespace)) {
return false;
}
// trap here, must check existence first since HashMap allows null value.
if (excludeTableCFsMap == null || !excludeTableCFsMap.containsKey(table)) {
return true;
}
Collection<String> cfs = excludeTableCFsMap.get(table);
// if cfs is null or empty then we can make sure that we do not need to replicate this table,
// otherwise, we may still need to replicate the table but filter out some families.
return cfs != null && !cfs.isEmpty();
} else {
// Not replicate all user tables, so filter by namespaces and table-cfs config
if (namespaces == null && tableCFsMap == null) {
return false;
}
// First filter by namespaces config
// If table's namespace in peer config, all the tables data are applicable for replication
if (namespaces != null && namespaces.contains(namespace)) {
return true;
}
return tableCFsMap != null && tableCFsMap.containsKey(table);
}
}
public static String getFileNameCompatibleString(TableName table) {
return table.getNamespaceAsString() + "-" + table.getQualifierAsString();
}
/**
* Backup request execution.
*
* @throws IOException if the execution of the backup fails
*/
@Override
public void execute() throws IOException {
try (Admin admin = conn.getAdmin()) {
// Begin BACKUP
beginBackup(backupManager, backupInfo);
String savedStartCode;
boolean firstBackup;
// do snapshot for full table backup
savedStartCode = backupManager.readBackupStartCode();
firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
if (firstBackup) {
// This is our first backup. Let's put some marker to system table so that we can hold the
// logs while we do the backup.
backupManager.writeBackupStartCode(0L);
}
// We roll log here before we do the snapshot. It is possible there is duplicate data
// in the log that is already in the snapshot. But if we do it after the snapshot, we
// could have data loss.
// A better approach is to do the roll log on each RS in the same global procedure as
// the snapshot.
LOG.info("Execute roll log procedure for full backup ...");
Map<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
newTimestamps = backupManager.readRegionServerLastLogRollResult();
if (firstBackup) {
// Updates registered log files
// We record ALL old WAL files as registered, because
// this is a first full backup in the system and these
// files are not needed for next incremental backup
List<String> logFiles = BackupUtils.getWALFilesOlderThan(conf, newTimestamps);
backupManager.recordWALFiles(logFiles);
}
// SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) {
String snapshotName =
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName);
}
// SNAPSHOT_COPY:
// do snapshot copy
LOG.debug("snapshot copy for " + backupId);
snapshotCopy(backupInfo);
// Updates incremental backup table set
backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
// BACKUP_COMPLETE:
// set overall backup status: complete. Here we make sure to complete the backup.
// After this checkpoint, even if entering cancel process, will let the backup finished
backupInfo.setState(BackupState.COMPLETE);
// The table list in backupInfo is good for both full backup and incremental backup.
// For incremental backup, it contains the incremental backup table set.
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap();
Long newStartCode =
BackupUtils.getMinValue(BackupUtils
.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
// backup complete
completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf);
} catch (Exception e) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
BackupType.FULL, conf);
throw new IOException(e);
}
}
@Override
public void execute() throws IOException {
// Get the stage ID to fail on
try (Admin admin = conn.getAdmin()) {
// Begin BACKUP
beginBackup(backupManager, backupInfo);
failStageIf(Stage.stage_0);
String savedStartCode;
boolean firstBackup;
// do snapshot for full table backup
savedStartCode = backupManager.readBackupStartCode();
firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L;
if (firstBackup) {
// This is our first backup. Let's put some marker to system table so that we can hold the
// logs while we do the backup.
backupManager.writeBackupStartCode(0L);
}
failStageIf(Stage.stage_1);
// We roll log here before we do the snapshot. It is possible there is duplicate data
// in the log that is already in the snapshot. But if we do it after the snapshot, we
// could have data loss.
// A better approach is to do the roll log on each RS in the same global procedure as
// the snapshot.
LOG.info("Execute roll log procedure for full backup ...");
Map<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
failStageIf(Stage.stage_2);
newTimestamps = backupManager.readRegionServerLastLogRollResult();
if (firstBackup) {
// Updates registered log files
// We record ALL old WAL files as registered, because
// this is a first full backup in the system and these
// files are not needed for next incremental backup
List<String> logFiles = BackupUtils.getWALFilesOlderThan(conf, newTimestamps);
backupManager.recordWALFiles(logFiles);
}
// SNAPSHOT_TABLES:
backupInfo.setPhase(BackupPhase.SNAPSHOT);
for (TableName tableName : tableList) {
String snapshotName =
"snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+ tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
snapshotTable(admin, tableName, snapshotName);
backupInfo.setSnapshotName(tableName, snapshotName);
}
failStageIf(Stage.stage_3);
// SNAPSHOT_COPY:
// do snapshot copy
LOG.debug("snapshot copy for " + backupId);
snapshotCopy(backupInfo);
// Updates incremental backup table set
backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
// BACKUP_COMPLETE:
// set overall backup status: complete. Here we make sure to complete the backup.
// After this checkpoint, even if entering cancel process, will let the backup finished
backupInfo.setState(BackupState.COMPLETE);
// The table list in backupInfo is good for both full backup and incremental backup.
// For incremental backup, it contains the incremental backup table set.
backupManager.writeRegionServerLogTimestamp(backupInfo.getTables(), newTimestamps);
HashMap<TableName, HashMap<String, Long>> newTableSetTimestampMap =
backupManager.readLogTimestampMap();
Long newStartCode =
BackupUtils.getMinValue(BackupUtils
.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);
failStageIf(Stage.stage_4);
// backup complete
completeBackup(conn, backupInfo, backupManager, BackupType.FULL, conf);
} catch (Exception e) {
if(autoRestoreOnFailure) {
failBackup(conn, backupInfo, backupManager, e, "Unexpected BackupException : ",
BackupType.FULL, conf);
}
throw new IOException(e);
}
}
/**
* Set a namespace in the peer config means that all tables in this namespace will be replicated
* to the peer cluster.
* <ol>
* <li>If peer config already has a namespace, then not allow set any table of this namespace to
* the peer config.</li>
* <li>If peer config already has a table, then not allow set this table's namespace to the peer
* config.</li>
* </ol>
* <p>
* Set a exclude namespace in the peer config means that all tables in this namespace can't be
* replicated to the peer cluster.
* <ol>
* <li>If peer config already has a exclude namespace, then not allow set any exclude table of
* this namespace to the peer config.</li>
* <li>If peer config already has a exclude table, then not allow set this table's namespace as a
* exclude namespace.</li>
* </ol>
*/
private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces,
Map<TableName, ? extends Collection<String>> tableCfs) throws DoNotRetryIOException {
if (namespaces == null || namespaces.isEmpty()) {
return;
}
if (tableCfs == null || tableCfs.isEmpty()) {
return;
}
for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
TableName table = entry.getKey();
if (namespaces.contains(table.getNamespaceAsString())) {
throw new DoNotRetryIOException("Table-cfs " + table + " is conflict with namespaces " +
table.getNamespaceAsString() + " in peer config");
}
}
}
/**
* Given the backup root dir, backup id and the table name, return the backup image location,
* which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup/backup_1396650096738/default/t1_dn/", where
* "hdfs://backup.hbase.org:9000/user/biadmin/backup" is a backup root directory
* @param backupRootDir backup root directory
* @param backupId backup id
* @param tableName table name
* @return backupPath String for the particular table
*/
public static String
getTableBackupDir(String backupRootDir, String backupId, TableName tableName) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
+ Path.SEPARATOR;
}
/**
* Given the backup root dir, backup id and the table name, return the backup image location,
* which is also where the backup manifest file is. return value look like:
* "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/"
* @param backupRootDir backup root directory
* @param backupId backup id
* @param tableName table name
* @return backupPath String for the particular table
*/
public static String getTableBackupDir(String backupRootDir, String backupId,
TableName tableName) {
return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR
+ tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString()
+ Path.SEPARATOR;
}
/**
* Returns the Table directory under the WALRootDir for the specified table name
* @param conf configuration used to get the WALRootDir
* @param tableName Table to get the directory for
* @return a path to the WAL table directory for the specified table
* @throws IOException if there is an exception determining the WALRootDir
*/
public static Path getWALTableDir(final Configuration conf, final TableName tableName)
throws IOException {
Path baseDir = new Path(getWALRootDir(conf), HConstants.BASE_NAMESPACE_DIR);
return new Path(new Path(baseDir, tableName.getNamespaceAsString()),
tableName.getQualifierAsString());
}