类org.apache.hadoop.hbase.ClusterStatus源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.ClusterStatus的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: nifi   文件: HBase_1_1_2_ClientService.java
/**
 * As of Apache NiFi 1.5.0, due to changes made to
 * {@link SecurityUtil#loginKerberos(Configuration, String, String)}, which is used by this
 * class to authenticate a principal with Kerberos, HBase controller services no longer
 * attempt relogins explicitly.  For more information, please read the documentation for
 * {@link SecurityUtil#loginKerberos(Configuration, String, String)}.
 * <p/>
 * In previous versions of NiFi, a {@link org.apache.nifi.hadoop.KerberosTicketRenewer} was started
 * when the HBase controller service was enabled.  The use of a separate thread to explicitly relogin could cause
 * race conditions with the implicit relogin attempts made by hadoop/HBase code on a thread that references the same
 * {@link UserGroupInformation} instance.  One of these threads could leave the
 * {@link javax.security.auth.Subject} in {@link UserGroupInformation} to be cleared or in an unexpected state
 * while the other thread is attempting to use the {@link javax.security.auth.Subject}, resulting in failed
 * authentication attempts that would leave the HBase controller service in an unrecoverable state.
 *
 * @see SecurityUtil#loginKerberos(Configuration, String, String)
 */
@OnEnabled
public void onEnabled(final ConfigurationContext context) throws InitializationException, IOException, InterruptedException {
    this.connection = createConnection(context);

    // connection check
    if (this.connection != null) {
        final Admin admin = this.connection.getAdmin();
        if (admin != null) {
            admin.listTableNames();

            final ClusterStatus clusterStatus = admin.getClusterStatus();
            if (clusterStatus != null) {
                final ServerName master = clusterStatus.getMaster();
                if (master != null) {
                    masterAddress = master.getHostAndPort();
                } else {
                    masterAddress = null;
                }
            }
        }
    }
}
 
源代码2 项目: nifi   文件: HBase_2_ClientService.java
/**
 * As of Apache NiFi 1.5.0, due to changes made to
 * {@link SecurityUtil#loginKerberos(Configuration, String, String)}, which is used by this
 * class to authenticate a principal with Kerberos, HBase controller services no longer
 * attempt relogins explicitly.  For more information, please read the documentation for
 * {@link SecurityUtil#loginKerberos(Configuration, String, String)}.
 * <p/>
 * In previous versions of NiFi, a {@link org.apache.nifi.hadoop.KerberosTicketRenewer} was started
 * when the HBase controller service was enabled.  The use of a separate thread to explicitly relogin could cause
 * race conditions with the implicit relogin attempts made by hadoop/HBase code on a thread that references the same
 * {@link UserGroupInformation} instance.  One of these threads could leave the
 * {@link javax.security.auth.Subject} in {@link UserGroupInformation} to be cleared or in an unexpected state
 * while the other thread is attempting to use the {@link javax.security.auth.Subject}, resulting in failed
 * authentication attempts that would leave the HBase controller service in an unrecoverable state.
 *
 * @see SecurityUtil#loginKerberos(Configuration, String, String)
 */
@OnEnabled
public void onEnabled(final ConfigurationContext context) throws InitializationException, IOException, InterruptedException {
    this.connection = createConnection(context);

    // connection check
    if (this.connection != null) {
        final Admin admin = this.connection.getAdmin();
        if (admin != null) {
            admin.listTableNames();

            final ClusterStatus clusterStatus = admin.getClusterStatus();
            if (clusterStatus != null) {
                final ServerName master = clusterStatus.getMaster();
                if (master != null) {
                    masterAddress = master.getHostAndPort();
                } else {
                    masterAddress = null;
                }
            }
        }
    }
}
 
源代码3 项目: pxf   文件: HBaseLookupTable.java
/**
 * Constructs a connector to HBase lookup table. Requires calling
 * {@link #close()} to close {@link HBaseAdmin} instance.
 *
 * @param conf HBase configuration
 * @throws IOException when initializing HBaseAdmin fails
 */
public HBaseLookupTable(Configuration conf) throws Exception {
    hbaseConfiguration = conf;
    connection = ConnectionFactory.createConnection(hbaseConfiguration);
    admin = connection.getAdmin();
    ClusterStatus cs = admin.getClusterStatus();
    LOG.debug("HBase cluster has " + cs.getServersSize()
            + " region servers " + "(" + cs.getDeadServers() + " dead)");
}
 
源代码4 项目: hbase-tools   文件: RegionLoadAdapter.java
public RegionLoadAdapter(HBaseAdmin admin, Map<byte[], HRegionInfo> regionMap, Args args) throws IOException {
    long timestamp = System.currentTimeMillis();

    ClusterStatus clusterStatus = admin.getClusterStatus();
    Collection<ServerName> serverNames = clusterStatus.getServers();
    for (ServerName serverName : serverNames) {
        HServerLoad serverLoad = clusterStatus.getLoad(serverName);
        for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : serverLoad.getRegionsLoad().entrySet()) {
            if (regionMap.get(entry.getKey()) != null)
                regionLoadMap.put(regionMap.get(entry.getKey()), new RegionLoadDelegator(entry.getValue()));
        }
    }

    Util.printVerboseMessage(args, "RegionLoadAdapter", timestamp);
}
 
源代码5 项目: tajo   文件: RegionSizeCalculator.java
private void init(RegionLocator regionLocator, Admin admin)
    throws IOException {
  if (!enabled(admin.getConfiguration())) {
    LOG.info("Region size calculation disabled.");
    return;
  }

  LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");

  //get regions for table
  List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
  Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
  for (HRegionLocation regionInfo : tableRegionInfos) {
    tableRegions.add(regionInfo.getRegionInfo().getRegionName());
  }

  ClusterStatus clusterStatus = admin.getClusterStatus();
  Collection<ServerName> servers = clusterStatus.getServers();
  final long megaByte = 1024L * 1024L;

  //iterate all cluster regions, filter regions from our table and compute their size
  for (ServerName serverName: servers) {
    ServerLoad serverLoad = clusterStatus.getLoad(serverName);

    for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
      byte[] regionId = regionLoad.getName();

      if (tableRegions.contains(regionId)) {

        long regionSizeBytes = (regionLoad.getStorefileSizeMB() + regionLoad.getMemStoreSizeMB()) * megaByte;
        sizeMap.put(regionId, regionSizeBytes);

        if (LOG.isDebugEnabled()) {
          LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
        }
      }
    }
  }
  LOG.debug("Region sizes calculated");
}
 
/**
 * Computes size of each region for table and given column families.
 * */
public HBaseRegionSizeCalculator(String tableName, Connection hbaseConnection) throws IOException {

    Table table = null;
    Admin admin = null;
    try {
        table = hbaseConnection.getTable(TableName.valueOf(tableName));
        admin = hbaseConnection.getAdmin();

        if (!enabled(table.getConfiguration())) {
            logger.info("Region size calculation disabled.");
            return;
        }

        logger.info("Calculating region sizes for table \"" + table.getName() + "\".");

        // Get regions for table.
        RegionLocator regionLocator = hbaseConnection.getRegionLocator(table.getName());
        List<HRegionLocation> regionLocationList = regionLocator.getAllRegionLocations();
        Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);

        for (HRegionLocation hRegionLocation : regionLocationList) {
            tableRegions.add(hRegionLocation.getRegionInfo().getRegionName());
        }

        ClusterStatus clusterStatus = admin.getClusterStatus();
        Collection<ServerName> servers = clusterStatus.getServers();
        final long megaByte = 1024L * 1024L;

        // Iterate all cluster regions, filter regions from our table and
        // compute their size.
        for (ServerName serverName : servers) {
            ServerLoad serverLoad = clusterStatus.getLoad(serverName);

            for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
                byte[] regionId = regionLoad.getName();

                if (tableRegions.contains(regionId)) {

                    long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
                    sizeMap.put(regionId, regionSizeBytes);
                    countMap.put(regionId, new Pair<>(regionLoad.getStores(), regionLoad.getStorefiles()));

                    if (regionSizeBytes == 0L) {
                        logger.info("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
                    }
                }
            }
        }
    } finally {
        IOUtils.closeQuietly(admin);
    }

}
 
源代码7 项目: phoenix   文件: IndexLoadBalancer.java
@Override
public void setClusterStatus(ClusterStatus st) {
    this.clusterStatus = st;
}
 
源代码8 项目: kylin   文件: HBaseRegionSizeCalculator.java
/**
 * Computes size of each region for table and given column families.
 * */
public HBaseRegionSizeCalculator(String tableName, Connection hbaseConnection) throws IOException {

    Table table = null;
    Admin admin = null;
    try {
        table = hbaseConnection.getTable(TableName.valueOf(tableName));
        admin = hbaseConnection.getAdmin();

        if (!enabled(table.getConfiguration())) {
            logger.info("Region size calculation disabled.");
            return;
        }

        logger.info("Calculating region sizes for table \"" + table.getName() + "\".");

        // Get regions for table.
        RegionLocator regionLocator = hbaseConnection.getRegionLocator(table.getName());
        List<HRegionLocation> regionLocationList = regionLocator.getAllRegionLocations();
        Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);

        for (HRegionLocation hRegionLocation : regionLocationList) {
            tableRegions.add(hRegionLocation.getRegionInfo().getRegionName());
        }

        ClusterStatus clusterStatus = admin.getClusterStatus();
        Collection<ServerName> servers = clusterStatus.getServers();
        final long megaByte = 1024L * 1024L;

        // Iterate all cluster regions, filter regions from our table and
        // compute their size.
        for (ServerName serverName : servers) {
            ServerLoad serverLoad = clusterStatus.getLoad(serverName);

            for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
                byte[] regionId = regionLoad.getName();

                if (tableRegions.contains(regionId)) {

                    long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
                    sizeMap.put(regionId, regionSizeBytes);
                    countMap.put(regionId, new Pair<>(regionLoad.getStores(), regionLoad.getStorefiles()));

                    if (regionSizeBytes == 0L) {
                        logger.info("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
                    }
                }
            }
        }
    } finally {
        IOUtils.closeQuietly(admin);
    }

}
 
源代码9 项目: Kylin   文件: HBaseRegionSizeCalculator.java
/** Constructor for unit testing */
HBaseRegionSizeCalculator(HTable table, HBaseAdmin hBaseAdmin) throws IOException {

    try {
        if (!enabled(table.getConfiguration())) {
            logger.info("Region size calculation disabled.");
            return;
        }

        logger.info("Calculating region sizes for table \"" + new String(table.getTableName()) + "\".");

        // Get regions for table.
        Set<HRegionInfo> tableRegionInfos = table.getRegionLocations().keySet();
        Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);

        for (HRegionInfo regionInfo : tableRegionInfos) {
            tableRegions.add(regionInfo.getRegionName());
        }

        ClusterStatus clusterStatus = hBaseAdmin.getClusterStatus();
        Collection<ServerName> servers = clusterStatus.getServers();
        final long megaByte = 1024L * 1024L;

        // Iterate all cluster regions, filter regions from our table and
        // compute their size.
        for (ServerName serverName : servers) {
            ServerLoad serverLoad = clusterStatus.getLoad(serverName);

            for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) {
                byte[] regionId = regionLoad.getName();

                if (tableRegions.contains(regionId)) {

                    long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
                    sizeMap.put(regionId, regionSizeBytes);

                    // logger.info("Region " + regionLoad.getNameAsString()
                    // + " has size " + regionSizeBytes);
                }
            }
        }
    } finally {
        hBaseAdmin.close();
    }

}
 
源代码10 项目: spliceengine   文件: HServer.java
public HServer(ServerName serverName, ClusterStatus clusterStatus) {
    this.serverName = serverName;
    this.clusterStatus = clusterStatus;
}
 
 类所在包
 同包方法