类org.apache.hadoop.hbase.TableNotFoundException源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.TableNotFoundException的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: Flink-CEPplus   文件: HBaseRowInputFormat.java
private void connectToTable() {

		if (this.conf == null) {
			this.conf = HBaseConfiguration.create();
		}

		try {
			Connection conn = ConnectionFactory.createConnection(conf);
			super.table = (HTable) conn.getTable(TableName.valueOf(tableName));
		} catch (TableNotFoundException tnfe) {
			LOG.error("The table " + tableName + " not found ", tnfe);
			throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
		} catch (IOException ioe) {
			LOG.error("Exception while creating connection to HBase.", ioe);
			throw new RuntimeException("Cannot create connection to HBase.", ioe);
		}
	}
 
源代码2 项目: flink   文件: HBaseRowInputFormat.java
private void connectToTable() {

		if (this.conf == null) {
			this.conf = HBaseConfiguration.create();
		}

		try {
			Connection conn = ConnectionFactory.createConnection(conf);
			super.table = (HTable) conn.getTable(TableName.valueOf(tableName));
		} catch (TableNotFoundException tnfe) {
			LOG.error("The table " + tableName + " not found ", tnfe);
			throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
		} catch (IOException ioe) {
			LOG.error("Exception while creating connection to HBase.", ioe);
			throw new RuntimeException("Cannot create connection to HBase.", ioe);
		}
	}
 
源代码3 项目: flink   文件: HBaseLookupFunction.java
@Override
public void open(FunctionContext context) {
	LOG.info("start open ...");
	org.apache.hadoop.conf.Configuration config = prepareRuntimeConfiguration();
	try {
		hConnection = ConnectionFactory.createConnection(config);
		table = (HTable) hConnection.getTable(TableName.valueOf(hTableName));
	} catch (TableNotFoundException tnfe) {
		LOG.error("Table '{}' not found ", hTableName, tnfe);
		throw new RuntimeException("HBase table '" + hTableName + "' not found.", tnfe);
	} catch (IOException ioe) {
		LOG.error("Exception while creating connection to HBase.", ioe);
		throw new RuntimeException("Cannot create connection to HBase.", ioe);
	}
	this.readHelper = new HBaseReadWriteHelper(hbaseTableSchema);
	LOG.info("end open.");
}
 
源代码4 项目: hbase   文件: RawAsyncHBaseAdmin.java
@Override
public CompletableFuture<Void> disableTableReplication(TableName tableName) {
  if (tableName == null) {
    return failedFuture(new IllegalArgumentException("Table name is null"));
  }
  CompletableFuture<Void> future = new CompletableFuture<>();
  addListener(tableExists(tableName), (exist, err) -> {
    if (err != null) {
      future.completeExceptionally(err);
      return;
    }
    if (!exist) {
      future.completeExceptionally(new TableNotFoundException(
        "Table '" + tableName.getNameAsString() + "' does not exists."));
      return;
    }
    addListener(setTableReplication(tableName, false), (result, err2) -> {
      if (err2 != null) {
        future.completeExceptionally(err2);
      } else {
        future.complete(result);
      }
    });
  });
  return future;
}
 
源代码5 项目: flink   文件: HBaseRowDataLookupFunction.java
@Override
public void open(FunctionContext context) {
	LOG.info("start open ...");
	Configuration config = prepareRuntimeConfiguration();
	try {
		hConnection = ConnectionFactory.createConnection(config);
		table = (HTable) hConnection.getTable(TableName.valueOf(hTableName));
	} catch (TableNotFoundException tnfe) {
		LOG.error("Table '{}' not found ", hTableName, tnfe);
		throw new RuntimeException("HBase table '" + hTableName + "' not found.", tnfe);
	} catch (IOException ioe) {
		LOG.error("Exception while creating connection to HBase.", ioe);
		throw new RuntimeException("Cannot create connection to HBase.", ioe);
	}
	this.serde = new HBaseSerde(hbaseTableSchema, nullStringLiteral);
	LOG.info("end open.");
}
 
源代码6 项目: flink   文件: HBaseLookupFunction.java
@Override
public void open(FunctionContext context) {
	LOG.info("start open ...");
	org.apache.hadoop.conf.Configuration config = prepareRuntimeConfiguration();
	try {
		hConnection = ConnectionFactory.createConnection(config);
		table = (HTable) hConnection.getTable(TableName.valueOf(hTableName));
	} catch (TableNotFoundException tnfe) {
		LOG.error("Table '{}' not found ", hTableName, tnfe);
		throw new RuntimeException("HBase table '" + hTableName + "' not found.", tnfe);
	} catch (IOException ioe) {
		LOG.error("Exception while creating connection to HBase.", ioe);
		throw new RuntimeException("Cannot create connection to HBase.", ioe);
	}
	this.readHelper = new HBaseReadWriteHelper(hbaseTableSchema);
	LOG.info("end open.");
}
 
/**
 * Check if there's an {@link TableNotFoundException} in the caused by stacktrace.
 */
@VisibleForTesting
public static boolean isTableNotFoundException(Throwable io) {
  if (io instanceof RemoteException) {
    io = ((RemoteException) io).unwrapRemoteException();
  }
  if (io != null && io.getMessage().contains("TableNotFoundException")) {
    return true;
  }
  for (; io != null; io = io.getCause()) {
    if (io instanceof TableNotFoundException) {
      return true;
    }
  }
  return false;
}
 
源代码8 项目: hbase   文件: SnapshotScannerHDFSAclController.java
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException {
  if (!initialized) {
    return;
  }
  try (Admin admin = c.getEnvironment().getConnection().getAdmin()) {
    if (admin.tableExists(PermissionStorage.ACL_TABLE_NAME)) {
      // Check if acl table has 'm' CF, if not, add 'm' CF
      TableDescriptor tableDescriptor = admin.getDescriptor(PermissionStorage.ACL_TABLE_NAME);
      boolean containHdfsAclFamily = Arrays.stream(tableDescriptor.getColumnFamilies()).anyMatch(
        family -> Bytes.equals(family.getName(), SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY));
      if (!containHdfsAclFamily) {
        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableDescriptor)
            .setColumnFamily(ColumnFamilyDescriptorBuilder
                .newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build());
        admin.modifyTable(builder.build());
      }
      aclTableInitialized = true;
    } else {
      throw new TableNotFoundException("Table " + PermissionStorage.ACL_TABLE_NAME
          + " is not created yet. Please check if " + getClass().getName()
          + " is configured after " + AccessController.class.getName());
    }
  }
}
 
源代码9 项目: hbase   文件: RSGroupAdminClient.java
/**
 * Move given set of servers and tables to the specified target RegionServer group.
 * @param servers set of servers to move
 * @param tables set of tables to move
 * @param targetGroup the target group name
 * @throws IOException if moving the server and tables fail
 */
public void moveServersAndTables(Set<Address> servers, Set<TableName> tables, String targetGroup)
  throws IOException {
  MoveServersAndTablesRequest.Builder builder =
    MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup);
  for (Address el : servers) {
    builder.addServers(HBaseProtos.ServerName.newBuilder().setHostName(el.getHostname())
      .setPort(el.getPort()).build());
  }
  for (TableName tableName : tables) {
    builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
    if (!admin.tableExists(tableName)) {
      throw new TableNotFoundException(tableName);
    }
  }
  try {
    stub.moveServersAndTables(null, builder.build());
  } catch (ServiceException e) {
    throw ProtobufUtil.handleRemoteException(e);
  }
}
 
源代码10 项目: hbase   文件: TestAccessController3.java
private static void cleanUp() throws Exception {
  // Clean the _acl_ table
  // TODO: Skipping delete because of access issues w/ AMv2.
  // AMv1 seems to crash servers on exit too for same lack of
  // auth perms but it gets hung up.
  try {
    deleteTable(TEST_UTIL, TEST_TABLE);
  } catch (TableNotFoundException ex) {
    // Test deleted the table, no problem
    LOG.info("Test deleted table " + TEST_TABLE);
  }
  // Verify all table/namespace permissions are erased
  assertEquals(0, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size());
  assertEquals(0,
    PermissionStorage.getNamespacePermissions(conf, TEST_TABLE.getNamespaceAsString()).size());
}
 
@Override
public void setup(Context context) throws TableNotFoundException,
    IOException {
  fs = FileSystem.get(context.getConfiguration());
  taskId = context.getTaskAttemptID().getTaskID().getId();
  schemaFileLocation = context.getConfiguration().get(SCHEMA_FILE_LOCATION_CONF);
  columns = generateColumnsFromSchemaFile(fs, schemaFileLocation);
  
  String outputPath = context.getConfiguration().get(OUTPUT_PATH_CONF);
  boolean shouldCompress = context.getConfiguration().getBoolean(SHOULD_COMPRESSION_CONF, false);
  
  OutputStream outputStream = fs.create(new Path(outputPath + "/part-m-" + StringUtils.leftPad(Integer.toString(taskId), 5, "0")), true);
  if (shouldCompress) {
    outputStream = new GZIPOutputStream(outputStream);
  }
  writer = new BufferedWriter(new OutputStreamWriter(outputStream));
  
  delimiter = context.getConfiguration().get(DELIMITER_CONF);
  rowKeyColumn = context.getConfiguration().get(ROW_KEY_COLUMN_CONF);
}
 
源代码12 项目: hbase   文件: TestTruncateTableProcedure.java
@Test
public void testTruncateNotExistentTable() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());

  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  // HBASE-20178 has us fail-fast, in the constructor, so add try/catch for this case.
  // Keep old way of looking at procedure too.
  Throwable cause = null;
  try {
    long procId = ProcedureTestingUtility.submitAndWait(procExec,
        new TruncateTableProcedure(procExec.getEnvironment(), tableName, true));

    // Second delete should fail with TableNotFound
    Procedure<?> result = procExec.getResult(procId);
    assertTrue(result.isFailed());
    cause = ProcedureTestingUtility.getExceptionCause(result);
  } catch (Throwable t) {
    cause = t;
  }
  LOG.debug("Truncate failed with exception: " + cause);
  assertTrue(cause instanceof TableNotFoundException);
}
 
源代码13 项目: hbase   文件: TableOutputFormat.java
/**
 * Checks if the output table exists and is enabled.
 *
 * @param context  The current context.
 * @throws IOException When the check fails.
 * @throws InterruptedException When the job is aborted.
 * @see OutputFormat#checkOutputSpecs(JobContext)
 */
@Override
public void checkOutputSpecs(JobContext context) throws IOException,
    InterruptedException {
  Configuration hConf = getConf();
  if (hConf == null) {
    hConf = context.getConfiguration();
  }

  try (Admin admin = ConnectionFactory.createConnection(hConf).getAdmin()) {
    TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE));
    if (!admin.tableExists(tableName)) {
      throw new TableNotFoundException("Can't write, table does not exist:" +
          tableName.getNameAsString());
    }

    if (!admin.isTableEnabled(tableName)) {
      throw new TableNotEnabledException("Can't write, table is not enabled: " +
          tableName.getNameAsString());
    }
  }
}
 
源代码14 项目: hbase   文件: RawAsyncHBaseAdmin.java
@Override
public CompletableFuture<Void> enableTableReplication(TableName tableName) {
  if (tableName == null) {
    return failedFuture(new IllegalArgumentException("Table name is null"));
  }
  CompletableFuture<Void> future = new CompletableFuture<>();
  addListener(tableExists(tableName), (exist, err) -> {
    if (err != null) {
      future.completeExceptionally(err);
      return;
    }
    if (!exist) {
      future.completeExceptionally(new TableNotFoundException(
        "Table '" + tableName.getNameAsString() + "' does not exists."));
      return;
    }
    addListener(getTableSplits(tableName), (splits, err1) -> {
      if (err1 != null) {
        future.completeExceptionally(err1);
      } else {
        addListener(checkAndSyncTableToPeerClusters(tableName, splits), (result, err2) -> {
          if (err2 != null) {
            future.completeExceptionally(err2);
          } else {
            addListener(setTableReplication(tableName, true), (result3, err3) -> {
              if (err3 != null) {
                future.completeExceptionally(err3);
              } else {
                future.complete(result3);
              }
            });
          }
        });
      }
    });
  });
  return future;
}
 
private static void createHTableIfNeeded(Connection conn, String tableName) throws IOException {
    Admin hbase = conn.getAdmin();

    try {
        boolean tableExist = false;
        try {
            hbase.getTableDescriptor(TableName.valueOf(tableName));
            tableExist = true;
        } catch (TableNotFoundException e) {
            //do nothing?
        }

        if (tableExist) {
            logger.info("HTable '{}' already exists", tableName);
            return;
        }

        logger.info("Creating HTable '{}'", tableName);

        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        HColumnDescriptor fd = new HColumnDescriptor(CF);
        fd.setBlocksize(CELL_SIZE);
        desc.addFamily(fd);
        hbase.createTable(desc);

        logger.info("HTable '{}' created", tableName);
    } finally {
        hbase.close();
    }
}
 
源代码16 项目: HBase-ToHDFS   文件: ExportHBaseTableToParquet.java
@Override
public void setup(Context context) throws TableNotFoundException, IOException {
  fs = FileSystem.get(context.getConfiguration());
  String schemaFileLocation = context.getConfiguration().get(SCHEMA_FILE_LOCATION_CONF);
  columns = generateColumnsFromSchemaFile(fs, schemaFileLocation);

  rowKeyColumn = context.getConfiguration().get(ROW_KEY_COLUMN_CONF, "");
}
 
@Override
public void setup(Context context) throws TableNotFoundException,
    IOException {
  taskId = context.getTaskAttemptID().getTaskID().getId();
  schemaFileLocation = context.getConfiguration().get(SCHEMA_FILE_LOCATION_CONF);
  FileSystem fs = FileSystem.get(context.getConfiguration());
  columns = generateColumnsFromSchemaFile(fs, schemaFileLocation);
  
  delimiter = context.getConfiguration().get(DELIMITER_CONF);
  rowKeyColumn = context.getConfiguration().get(ROW_KEY_COLUMN_CONF);
}
 
源代码18 项目: Kylin   文件: GridTableHBaseBenchmark.java
private static void createHTableIfNeeded(HConnection conn, String tableName) throws IOException {
    HBaseAdmin hbase = new HBaseAdmin(conn);

    try {
        boolean tableExist = false;
        try {
            hbase.getTableDescriptor(TableName.valueOf(tableName));
            tableExist = true;
        } catch (TableNotFoundException e) {
        }

        if (tableExist) {
            System.out.println("HTable '" + tableName + "' already exists");
            return;
        }

        System.out.println("Creating HTable '" + tableName + "'");

        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        HColumnDescriptor fd = new HColumnDescriptor(CF);
        fd.setBlocksize(CELL_SIZE);
        desc.addFamily(fd);
        hbase.createTable(desc);

        System.out.println("HTable '" + tableName + "' created");
    } finally {
        hbase.close();
    }
}
 
源代码19 项目: phoenix   文件: ProductMetricsIT.java
private static void destroyTable() throws Exception {
    // Physically delete HBase table so that splits occur as expected for each test
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    ConnectionQueryServices services = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class).getQueryServices();
    HBaseAdmin admin = services.getAdmin();
    try {
        try {
            admin.disableTable(PRODUCT_METRICS_NAME);
            admin.deleteTable(PRODUCT_METRICS_NAME);
        } catch (TableNotFoundException e) {
        }
   } finally {
            admin.close();
    }
}
 
源代码20 项目: kylin   文件: GridTableHBaseBenchmark.java
private static void createHTableIfNeeded(Connection conn, String tableName) throws IOException {
    Admin hbase = conn.getAdmin();

    try {
        boolean tableExist = false;
        try {
            hbase.getTableDescriptor(TableName.valueOf(tableName));
            tableExist = true;
        } catch (TableNotFoundException e) {
            //do nothing?
        }

        if (tableExist) {
            logger.info("HTable '{}' already exists", tableName);
            return;
        }

        logger.info("Creating HTable '{}'", tableName);

        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));

        HColumnDescriptor fd = new HColumnDescriptor(CF);
        fd.setBlocksize(CELL_SIZE);
        desc.addFamily(fd);
        hbase.createTable(desc);

        logger.info("HTable '{}' created", tableName);
    } finally {
        hbase.close();
    }
}
 
源代码21 项目: flink   文件: HBaseRowInputFormat.java
private void connectToTable() {
	try {
		Connection conn = ConnectionFactory.createConnection(getHadoopConfiguration());
		super.table = (HTable) conn.getTable(TableName.valueOf(tableName));
	} catch (TableNotFoundException tnfe) {
		LOG.error("The table " + tableName + " not found ", tnfe);
		throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
	} catch (IOException ioe) {
		LOG.error("Exception while creating connection to HBase.", ioe);
		throw new RuntimeException("Cannot create connection to HBase.", ioe);
	}
}
 
源代码22 项目: flink   文件: HBaseRowDataInputFormat.java
private void connectToTable() {
	try {
		Connection conn = ConnectionFactory.createConnection(getHadoopConfiguration());
		super.table = (HTable) conn.getTable(TableName.valueOf(tableName));
	} catch (TableNotFoundException tnfe) {
		LOG.error("The table " + tableName + " not found ", tnfe);
		throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
	} catch (IOException ioe) {
		LOG.error("Exception while creating connection to HBase.", ioe);
		throw new RuntimeException("Cannot create connection to HBase.", ioe);
	}
}
 
源代码23 项目: hbase   文件: ThriftAdmin.java
@Override
public TableDescriptor getDescriptor(TableName tableName)
    throws TableNotFoundException, IOException {
  TTableName tTableName = ThriftUtilities.tableNameFromHBase(tableName);
  try {
    TTableDescriptor tTableDescriptor = client.getTableDescriptor(tTableName);
    return ThriftUtilities.tableDescriptorFromThrift(tTableDescriptor);
  } catch (TException e) {
    throw new IOException(e);
  }
}
 
源代码24 项目: hbase   文件: QuotaUtil.java
/**
 * Method to disable a table, if not already disabled. This method suppresses
 * {@link TableNotEnabledException}, if thrown while disabling the table.
 * @param conn connection to re-use
 * @param tableName table name which has moved into space quota violation
 */
public static void disableTableIfNotDisabled(Connection conn, TableName tableName)
    throws IOException {
  try {
    conn.getAdmin().disableTable(tableName);
  } catch (TableNotEnabledException | TableNotFoundException e) {
    // ignore
  }
}
 
源代码25 项目: hbase   文件: HMaster.java
@Override
public void checkTableModifiable(final TableName tableName)
    throws IOException, TableNotFoundException, TableNotDisabledException {
  if (isCatalogTable(tableName)) {
    throw new IOException("Can't modify catalog tables");
  }
  checkTableExists(tableName);
  TableState ts = getTableStateManager().getTableState(tableName);
  if (!ts.isDisabled()) {
    throw new TableNotDisabledException("Not DISABLED; " + ts);
  }
}
 
源代码26 项目: hbase   文件: MasterProcedureScheduler.java
@Override
public void completionCleanup(final Procedure proc) {
  if (proc instanceof TableProcedureInterface) {
    TableProcedureInterface iProcTable = (TableProcedureInterface) proc;
    boolean tableDeleted;
    if (proc.hasException()) {
      Exception procEx = proc.getException().unwrapRemoteException();
      if (iProcTable.getTableOperationType() == TableOperationType.CREATE) {
        // create failed because the table already exist
        tableDeleted = !(procEx instanceof TableExistsException);
      } else {
        // the operation failed because the table does not exist
        tableDeleted = (procEx instanceof TableNotFoundException);
      }
    } else {
      // the table was deleted
      tableDeleted = (iProcTable.getTableOperationType() == TableOperationType.DELETE);
    }
    if (tableDeleted) {
      markTableAsDeleted(iProcTable.getTableName(), proc);
      return;
    }
  } else if (proc instanceof PeerProcedureInterface) {
    tryCleanupPeerQueue(getPeerId(proc), proc);
  } else if (proc instanceof ServerProcedureInterface) {
    tryCleanupServerQueue(getServerName(proc), proc);
  } else {
    // No cleanup for other procedure types, yet.
    return;
  }
}
 
源代码27 项目: hbase   文件: RestoreSnapshotProcedure.java
/**
 * Action before any real action of restoring from snapshot.
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private void prepareRestore(final MasterProcedureEnv env) throws IOException {
  final TableName tableName = getTableName();
  // Checks whether the table exists
  if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
    throw new TableNotFoundException(tableName);
  }

  // Check whether table is disabled.
  env.getMasterServices().checkTableModifiable(tableName);

  // Check that we have at least 1 CF
  if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
    throw new DoNotRetryIOException("Table " + getTableName().toString() +
      " should have at least one column family.");
  }

  if (!getTableName().isSystemTable()) {
    // Table already exist. Check and update the region quota for this table namespace.
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    SnapshotManifest manifest = SnapshotManifest.open(
      env.getMasterConfiguration(),
      mfs.getFileSystem(),
      SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()),
      snapshot);
    int snapshotRegionCount = manifest.getRegionManifestsMap().size();
    int tableRegionCount =
        ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);

    if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
      ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(
        tableName, snapshotRegionCount);
    }
  }
}
 
源代码28 项目: HBase-ToHDFS   文件: ExportHBaseTableToAvro.java
@Override
public void setup(Context context) throws TableNotFoundException, IOException {
  fs = FileSystem.get(context.getConfiguration());
  String schemaFileLocation = context.getConfiguration().get(SCHEMA_FILE_LOCATION_CONF);
  columns = generateColumnsFromSchemaFile(fs, schemaFileLocation);

  Schema.Parser parser = new Schema.Parser();
  schema = parser.parse(context.getConfiguration().get("avro.schema.output.key"));
  rowKeyColumn = context.getConfiguration().get(ROW_KEY_COLUMN_CONF, "");
  
}
 
源代码29 项目: hbase   文件: TruncateTableProcedure.java
private boolean prepareTruncate(final MasterProcedureEnv env) throws IOException {
  try {
    env.getMasterServices().checkTableModifiable(getTableName());
  } catch (TableNotFoundException|TableNotDisabledException e) {
    setFailure("master-truncate-table", e);
    return false;
  }
  return true;
}
 
源代码30 项目: hbase   文件: EnableTableProcedure.java
/**
 * Action before any real action of enabling table. Set the exception in the procedure instead
 * of throwing it.  This approach is to deal with backward compatible with 1.0.
 * @param env MasterProcedureEnv
 * @return whether the table passes the necessary checks
 * @throws IOException
 */
private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
  boolean canTableBeEnabled = true;

  // Check whether table exists
  if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
    setFailure("master-enable-table", new TableNotFoundException(tableName));
    canTableBeEnabled = false;
  } else {
    // There could be multiple client requests trying to disable or enable
    // the table at the same time. Ensure only the first request is honored
    // After that, no other requests can be accepted until the table reaches
    // DISABLED or ENABLED.
    //
    // Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
    // the state to ENABLING from DISABLED. The implementation was done before table lock
    // was implemented. With table lock, there is no need to set the state here (it will
    // set the state later on). A quick state check should be enough for us to move forward.
    TableStateManager tsm = env.getMasterServices().getTableStateManager();
    TableState ts = tsm.getTableState(tableName);
    if(!ts.isDisabled()){
      LOG.info("Not DISABLED tableState={}; skipping enable; {}", ts.getState(), this);
      setFailure("master-enable-table", new TableNotDisabledException(ts.toString()));
      canTableBeEnabled = false;
    }
  }

  // We are done the check. Future actions in this procedure could be done asynchronously.
  releaseSyncLatch();

  return canTableBeEnabled;
}
 
 类所在包
 同包方法