org.apache.hadoop.hbase.TableName#equals ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.TableName#equals ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: phoenix   文件: IndexLoadBalancer.java
/**
 * Populates table's region locations into co-location info from master.
 * @param table
 */
public void populateRegionLocations(TableName table) {
    synchronized (this.colocationInfo) {
        if (!isTableColocated(table)) {
            throw new IllegalArgumentException("Specified table " + table
                    + " should be in one of the tables to co-locate.");
        }
        RegionStates regionStates = this.master.getAssignmentManager().getRegionStates();
        List<HRegionInfo> onlineRegions = regionStates.getRegionsOfTable(table);
        for (HRegionInfo hri : onlineRegions) {
            regionOnline(hri, regionStates.getRegionServerOfRegion(hri));
        }
        Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition();
        for (RegionState regionState : regionsInTransition.values()) {
            if (table.equals(regionState.getRegion().getTable())
                    && regionState.getServerName() != null) {
                regionOnline(regionState.getRegion(), regionState.getServerName());
            }
        }
    }
}
 
源代码2 项目: hbase   文件: TableStateManager.java
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
  if (tableName.equals(TableName.META_TABLE_NAME)) {
    if (TableState.State.DISABLING.equals(newState) ||
        TableState.State.DISABLED.equals(newState)) {
      throw new IllegalArgumentIOException("Cannot disable meta table; " + newState);
    }
    // Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED.
    return;
  }
  boolean succ = false;
  try {
    MetaTableAccessor.updateTableState(master.getConnection(), tableName, newState);
    tableName2State.put(tableName, newState);
    succ = true;
  } finally {
    if (!succ) {
      this.tableName2State.remove(tableName);
    }
  }
  metaStateUpdated(tableName, newState);
}
 
源代码3 项目: hbase   文件: ReaderBase.java
@Override
public Entry next(Entry reuse) throws IOException {
  Entry e = reuse;
  if (e == null) {
    e = new Entry();
  }

  boolean hasEntry = false;
  try {
    hasEntry = readNext(e);
  } catch (IllegalArgumentException iae) {
    TableName tableName = e.getKey().getTableName();
    if (tableName != null && tableName.equals(TableName.OLD_ROOT_TABLE_NAME)) {
      // It is old ROOT table edit, ignore it
      LOG.info("Got an old ROOT edit, ignoring ");
      return next(e);
    }
    else throw iae;
  }
  edit++;
  if (compressionContext != null && emptyCompressionContext) {
    emptyCompressionContext = false;
  }
  return hasEntry ? e : null;
}
 
源代码4 项目: hbase   文件: SnapshotTestingUtils.java
/**
 * Make sure that there is only one snapshot returned from the master and its
 * name and table match the passed in parameters.
 */
public static List<SnapshotDescription> assertExistsMatchingSnapshot(
    Admin admin, String snapshotName, TableName tableName)
    throws IOException {
  // list the snapshot
  List<SnapshotDescription> snapshots = admin.listSnapshots();

  List<SnapshotDescription> returnedSnapshots = new ArrayList<>();
  for (SnapshotDescription sd : snapshots) {
    if (snapshotName.equals(sd.getName()) && tableName.equals(sd.getTableName())) {
      returnedSnapshots.add(sd);
    }
  }

  Assert.assertTrue("No matching snapshots found.", returnedSnapshots.size()>0);
  return returnedSnapshots;
}
 
/**
 * Fetch table state for given table from META table
 * @param conn connection to use
 * @param tableName table to fetch state for
 */
public static TableState getTableState(Connection conn, TableName tableName)
  throws IOException {
  if (tableName.equals(TableName.META_TABLE_NAME)) {
    return new TableState(tableName, TableState.State.ENABLED);
  }
  Table metaHTable = conn.getTable(TableName.META_TABLE_NAME);
  Get get = new Get(tableName.getName()).addColumn(TABLE_FAMILY, TABLE_STATE_QUALIFIER);
  Result result = metaHTable.get(get);
  return getTableState(result);
}
 
源代码6 项目: phoenix   文件: IndexLoadBalancer.java
/**
 * Add tables whose regions to co-locate.
 * @param userTable
 * @param indexTable
 */
public void addTablesToColocate(TableName userTable, TableName indexTable) {
    if (userTable.equals(indexTable)) {
        throw new IllegalArgumentException("Tables to colocate should not be same.");
    } else if (isTableColocated(userTable)) {
        throw new IllegalArgumentException("User table already colocated with table "
                + getMappedTableToColocate(userTable));
    } else if (isTableColocated(indexTable)) {
        throw new IllegalArgumentException("Index table is already colocated with table "
                + getMappedTableToColocate(indexTable));
    }
    userTableVsIndexTable.put(userTable, indexTable);
    indexTableVsUserTable.put(indexTable, userTable);
}
 
源代码7 项目: hbase   文件: IncrementalTableBackupClient.java
protected static int getIndex(TableName tbl, List<TableName> sTableList) {
  if (sTableList == null) {
    return 0;
  }

  for (int i = 0; i < sTableList.size(); i++) {
    if (tbl.equals(sTableList.get(i))) {
      return i;
    }
  }
  return -1;
}
 
源代码8 项目: hbase   文件: VisibilityController.java
@Override
public void preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan)
    throws IOException {
  if (!initialized) {
    throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!");
  }
  // Nothing to do if authorization is not enabled
  if (!authorizationEnabled) {
    return;
  }
  Region region = e.getEnvironment().getRegion();
  Authorizations authorizations = null;
  try {
    authorizations = scan.getAuthorizations();
  } catch (DeserializationException de) {
    throw new IOException(de);
  }
  if (authorizations == null) {
    // No Authorizations present for this scan/Get!
    // In case of system tables other than "labels" just scan with out visibility check and
    // filtering. Checking visibility labels for META and NAMESPACE table is not needed.
    TableName table = region.getRegionInfo().getTable();
    if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
      return;
    }
  }

  Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(region,
      authorizations);
  if (visibilityLabelFilter != null) {
    Filter filter = scan.getFilter();
    if (filter != null) {
      scan.setFilter(new FilterList(filter, visibilityLabelFilter));
    } else {
      scan.setFilter(visibilityLabelFilter);
    }
  }
}
 
源代码9 项目: hbase   文件: VisibilityController.java
@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
    List<Cell> results) throws IOException {
  if (!initialized) {
    throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized");
  }
  // Nothing useful to do if authorization is not enabled
  if (!authorizationEnabled) {
    return;
  }
  Region region = e.getEnvironment().getRegion();
  Authorizations authorizations = null;
  try {
    authorizations = get.getAuthorizations();
  } catch (DeserializationException de) {
    throw new IOException(de);
  }
  if (authorizations == null) {
    // No Authorizations present for this scan/Get!
    // In case of system tables other than "labels" just scan with out visibility check and
    // filtering. Checking visibility labels for META and NAMESPACE table is not needed.
    TableName table = region.getRegionInfo().getTable();
    if (table.isSystemTable() && !table.equals(LABELS_TABLE_NAME)) {
      return;
    }
  }
  Filter visibilityLabelFilter = VisibilityUtils.createVisibilityLabelFilter(e.getEnvironment()
      .getRegion(), authorizations);
  if (visibilityLabelFilter != null) {
    Filter filter = get.getFilter();
    if (filter != null) {
      get.setFilter(new FilterList(filter, visibilityLabelFilter));
    } else {
      get.setFilter(visibilityLabelFilter);
    }
  }
}
 
源代码10 项目: hbase   文件: TableStateManager.java
public void setDeletedTable(TableName tableName) throws IOException {
  if (tableName.equals(TableName.META_TABLE_NAME)) {
    // Can't delete the hbase:meta table.
    return;
  }
  ReadWriteLock lock = tnLock.getLock(tableName);
  lock.writeLock().lock();
  try {
    MetaTableAccessor.deleteTableState(master.getConnection(), tableName);
    metaStateDeleted(tableName);
  } finally {
    tableName2State.remove(tableName);
    lock.writeLock().unlock();
  }
}
 
源代码11 项目: hbase   文件: AsyncRegionLocator.java
void clearCache(TableName tableName) {
  LOG.debug("Clear meta cache for {}", tableName);
  if (tableName.equals(META_TABLE_NAME)) {
    metaRegionLocator.clearCache();
  } else {
    nonMetaRegionLocator.clearCache(tableName);
  }
}
 
源代码12 项目: hbase   文件: TestSyncReplicationWALProvider.java
@Override
public Optional<Pair<String, String>> getPeerIdAndRemoteWALDir(TableName table) {
  if (table != null && table.equals(TABLE)) {
    return Optional.of(Pair.newPair(PEER_ID, REMOTE_WAL_DIR));
  } else {
    return Optional.empty();
  }
}
 
源代码13 项目: hbase   文件: RawAsyncHBaseAdmin.java
@Override
public CompletableFuture<List<RegionInfo>> getRegions(TableName tableName) {
  if (tableName.equals(META_TABLE_NAME)) {
    return connection.registry.getMetaRegionLocations()
      .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
        .collect(Collectors.toList()));
  } else {
    return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName)
      .thenApply(
        locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList()));
  }
}
 
源代码14 项目: hbase   文件: TestQuotaStatusRPCs.java
private int getTableSize(TableName tn, Map<RegionInfo,Long> regionSizes) {
  int tableSize = 0;
  for (Entry<RegionInfo,Long> entry : regionSizes.entrySet()) {
    RegionInfo regionInfo = entry.getKey();
    long regionSize = entry.getValue();
    if (tn.equals(regionInfo.getTable())) {
      tableSize += regionSize;
    }
  }
  return tableSize;
}
 
源代码15 项目: hbase   文件: TestRegionSizeUse.java
/**
 * Computes the number of regions for the given table that have a positive size.
 *
 * @param tn The TableName in question
 * @param regions A collection of region sizes
 * @return The number of regions for the given table.
 */
private int numRegionsForTable(TableName tn, Map<RegionInfo,Long> regions) {
  int sum = 0;
  for (Entry<RegionInfo,Long> entry : regions.entrySet()) {
    if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) {
      sum++;
    }
  }
  return sum;
}
 
private int getRegionReportsForTable(Map<RegionInfo,Long> reports, TableName tn) {
  int numReports = 0;
  for (Entry<RegionInfo,Long> entry : reports.entrySet()) {
    if (tn.equals(entry.getKey().getTable())) {
      numReports++;
    }
  }
  return numReports;
}
 
源代码17 项目: hbase   文件: ProtobufUtil.java
/**
 * Convert HBaseProto.RegionInfo to a RegionInfo
 *
 * @param proto the RegionInfo to convert
 * @return the converted RegionInfo
 */
public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBaseProtos.RegionInfo proto) {
  if (proto == null) {
    return null;
  }
  TableName tableName = ProtobufUtil.toTableName(proto.getTableName());
  long regionId = proto.getRegionId();
  int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
  int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId;
  if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) {
    return RegionInfoBuilder.FIRST_META_REGIONINFO;
  }
  byte[] startKey = null;
  byte[] endKey = null;
  if (proto.hasStartKey()) {
    startKey = proto.getStartKey().toByteArray();
  }
  if (proto.hasEndKey()) {
    endKey = proto.getEndKey().toByteArray();
  }
  boolean split = false;
  if (proto.hasSplit()) {
    split = proto.getSplit();
  }
  RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName)
  .setStartKey(startKey)
  .setEndKey(endKey)
  .setRegionId(regionId)
  .setReplicaId(replicaId)
  .setSplit(split);
  if (proto.hasOffline()) {
    rib.setOffline(proto.getOffline());
  }
  return rib.build();
}
 
源代码18 项目: hbase   文件: BackupManager.java
/**
 * Creates a backup info based on input backup request.
 * @param backupId backup id
 * @param type type
 * @param tableList table list
 * @param targetRootDir root dir
 * @param workers number of parallel workers
 * @param bandwidth bandwidth per worker in MB per sec
 * @return BackupInfo
 * @throws BackupException exception
 */
public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList,
    String targetRootDir, int workers, long bandwidth) throws BackupException {
  if (targetRootDir == null) {
    throw new BackupException("Wrong backup request parameter: target backup root directory");
  }

  if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) {
    // If table list is null for full backup, which means backup all tables. Then fill the table
    // list with all user tables from meta. It no table available, throw the request exception.
    List<TableDescriptor> htds = null;
    try (Admin admin = conn.getAdmin()) {
      htds = admin.listTableDescriptors();
    } catch (Exception e) {
      throw new BackupException(e);
    }

    if (htds == null) {
      throw new BackupException("No table exists for full backup of all tables.");
    } else {
      tableList = new ArrayList<>();
      for (TableDescriptor hTableDescriptor : htds) {
        TableName tn = hTableDescriptor.getTableName();
        if (tn.equals(BackupSystemTable.getTableName(conf))) {
          // skip backup system table
          continue;
        }
        tableList.add(hTableDescriptor.getTableName());
      }

      LOG.info("Full backup all the tables available in the cluster: {}", tableList);
    }
  }

  // there are one or more tables in the table list
  backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]),
    targetRootDir);
  backupInfo.setBandwidth(bandwidth);
  backupInfo.setWorkers(workers);
  return backupInfo;
}
 
源代码19 项目: hbase   文件: HMaster.java
private static boolean isCatalogTable(final TableName tableName) {
  return tableName.equals(TableName.META_TABLE_NAME);
}
 
源代码20 项目: hbase   文件: HBaseFsck.java
/**
 * Scan HDFS for all regions, recording their information into
 * regionInfoMap
 */
public void loadHdfsRegionDirs() throws IOException, InterruptedException {
  Path rootDir = CommonFSUtils.getRootDir(getConf());
  FileSystem fs = rootDir.getFileSystem(getConf());

  // list all tables from HDFS
  List<FileStatus> tableDirs = Lists.newArrayList();

  boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME));

  List<Path> paths = FSUtils.getTableDirs(fs, rootDir);
  for (Path path : paths) {
    TableName tableName = CommonFSUtils.getTableName(path);
     if ((!checkMetaOnly &&
         isTableIncluded(tableName)) ||
         tableName.equals(TableName.META_TABLE_NAME)) {
       tableDirs.add(fs.getFileStatus(path));
     }
  }

  // verify that version file exists
  if (!foundVersionFile) {
    errors.reportError(ERROR_CODE.NO_VERSION_FILE,
        "Version file does not exist in root dir " + rootDir);
    if (shouldFixVersionFile()) {
      LOG.info("Trying to create a new " + HConstants.VERSION_FILE_NAME
          + " file.");
      setShouldRerun();
      FSUtils.setVersion(fs, rootDir, getConf().getInt(
          HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), getConf().getInt(
          HConstants.VERSION_FILE_WRITE_ATTEMPTS,
          HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
    }
  }

  // Avoid multithreading at table-level because already multithreaded internally at
  // region-level.  Additionally multithreading at table-level can lead to deadlock
  // if there are many tables in the cluster.  Since there are a limited # of threads
  // in the executor's thread pool and if we multithread at the table-level by putting
  // WorkItemHdfsDir callables into the executor, then we will have some threads in the
  // executor tied up solely in waiting for the tables' region-level calls to complete.
  // If there are enough tables then there will be no actual threads in the pool left
  // for the region-level callables to be serviced.
  for (FileStatus tableDir : tableDirs) {
    LOG.debug("Loading region dirs from " +tableDir.getPath());
    WorkItemHdfsDir item = new WorkItemHdfsDir(fs, errors, tableDir);
    try {
      item.call();
    } catch (ExecutionException e) {
      LOG.warn("Could not completely load table dir " +
          tableDir.getPath(), e.getCause());
    }
  }
  errors.print("");
}