类org.apache.hadoop.hbase.MiniHBaseCluster源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.MiniHBaseCluster的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: phoenix   文件: IndexLoadBalancerIT.java
@Test(timeout = 180000)
public void testRandomAssignmentDuringIndexTableEnable() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    master.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", false);
    TableName tableName = TableName.valueOf("testRandomAssignmentDuringIndexTableEnable");
    TableName indexTableName =
            TableName.valueOf("testRandomAssignmentDuringIndexTableEnable_index");
    createUserAndIndexTable(tableName, indexTableName);
    admin.disableTable(tableName);
    admin.disableTable(indexTableName);
    admin.enableTable(tableName);
    admin.enableTable(indexTableName);
    boolean isRegionColocated =
            checkForColocation(master, tableName.getNameAsString(), indexTableName
                    .getNameAsString());
    assertTrue("User regions and index regions should colocate.", isRegionColocated);

}
 
源代码2 项目: phoenix   文件: TestWALRecoveryCaching.java
/**
 * @param cluster
 * @param indexTable
 * @param primaryTable
 * @return
 * @throws Exception
 */
private ServerName getSharedServer(MiniHBaseCluster cluster, byte[] indexTable,
    byte[] primaryTable) throws Exception {
  Set<ServerName> indexServers = getServersForTable(cluster, indexTable);
  Set<ServerName> primaryServers = getServersForTable(cluster, primaryTable);

  Set<ServerName> joinSet = new HashSet<ServerName>(indexServers);
  joinSet.addAll(primaryServers);
  // if there is already an overlap, then find it and return it
  if (joinSet.size() < indexServers.size() + primaryServers.size()) {
    // find the first overlapping server
    for (ServerName server : joinSet) {
      if (indexServers.contains(server) && primaryServers.contains(server)) {
        return server;
      }
    }
    throw new RuntimeException(
        "Couldn't find a matching server on which both the primary and index table live, "
            + "even though they have overlapping server sets");
  }
  return null;
}
 
源代码3 项目: hbase   文件: TestClassLoading.java
@Test
// HBASE-3516: Test CP Class loading from local file system
public void testClassLoadingFromLocalFS() throws Exception {
  File jarFile = buildCoprocessorJar(cpName3);

  // create a table that references the jar
  TableDescriptorBuilder tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(cpName3));
  tdb.setColumnFamily(ColumnFamilyDescriptorBuilder
    .newBuilder(Bytes.toBytes("test")).build());
  tdb.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" +
    Coprocessor.PRIORITY_USER);
  TableDescriptor tableDescriptor = tdb.build();
  Admin admin = TEST_UTIL.getAdmin();
  admin.createTable(tableDescriptor);
  waitForTable(tableDescriptor.getTableName());

  // verify that the coprocessor was loaded
  boolean found = false;
  MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
  for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
    if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) {
      found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
    }
  }
  assertTrue("Class " + cpName3 + " was missing on a region", found);
}
 
源代码4 项目: phoenix   文件: TestWALRecoveryCaching.java
/**
 * @param cluster
 * @param indexTable
 * @param primaryTable
 * @return
 * @throws Exception
 */
private ServerName getSharedServer(MiniHBaseCluster cluster, byte[] indexTable,
    byte[] primaryTable) throws Exception {
  Set<ServerName> indexServers = getServersForTable(cluster, indexTable);
  Set<ServerName> primaryServers = getServersForTable(cluster, primaryTable);

  Set<ServerName> joinSet = new HashSet<ServerName>(indexServers);
  joinSet.addAll(primaryServers);
  // if there is already an overlap, then find it and return it
  if (joinSet.size() < indexServers.size() + primaryServers.size()) {
    // find the first overlapping server
    for (ServerName server : joinSet) {
      if (indexServers.contains(server) && primaryServers.contains(server)) {
        return server;
      }
    }
    throw new RuntimeException(
        "Couldn't find a matching server on which both the primary and index table live, "
            + "even though they have overlapping server sets");
  }
  return null;
}
 
源代码5 项目: hbase   文件: TestSeparateClientZKCluster.java
@Test
public void testMasterSwitch() throws Exception {
  // get an admin instance and issue some request first
  Connection conn = TEST_UTIL.getConnection();
  try (Admin admin = conn.getAdmin()) {
    LOG.debug("Tables: " + admin.listTableDescriptors());
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    // switch active master
    HMaster master = cluster.getMaster();
    master.stopMaster();
    LOG.info("Stopped master {}", master.getServerName());
    while (!master.isShutDown()) {
      Thread.sleep(200);
    }
    LOG.info("Shutdown master {}", master.getServerName());
    while (cluster.getMaster() == null || !cluster.getMaster().isInitialized()) {
      LOG.info("Get master {}", cluster.getMaster() == null? "null":
        cluster.getMaster().getServerName());
      Thread.sleep(200);
    }
    LOG.info("Got master {}", cluster.getMaster().getServerName());
    // confirm client access still works
    Assert.assertTrue(admin.balance(false));
  }
}
 
源代码6 项目: hbase   文件: TestRSGroupsOfflineMode.java
@BeforeClass
public static void setUp() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  RSGroupUtil.enableRSGroup(TEST_UTIL.getConfiguration());
  TEST_UTIL.getConfiguration().set(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, "1");
  StartMiniClusterOption option =
    StartMiniClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build();
  TEST_UTIL.startMiniCluster(option);
  cluster = TEST_UTIL.getHBaseCluster();
  master = ((MiniHBaseCluster) cluster).getMaster();
  master.balanceSwitch(false);
  hbaseAdmin = TEST_UTIL.getAdmin();
  // wait till the balancer is in online mode
  TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() throws Exception {
      return master.isInitialized() &&
        ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() &&
        master.getServerManager().getOnlineServersList().size() >= 3;
    }
  });
}
 
源代码7 项目: hbase   文件: TestRSGroupMajorCompactionTTL.java
@Before
@Override
public void setUp() throws Exception {
  utility = new HBaseTestingUtility();
  Configuration conf = utility.getConfiguration();
  RSGroupUtil.enableRSGroup(conf);
  conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, NUM_SLAVES_BASE);
  conf.setInt("hbase.hfile.compaction.discharger.interval", 10);
  utility.startMiniCluster(NUM_SLAVES_BASE);
  MiniHBaseCluster cluster = utility.getHBaseCluster();
  final HMaster master = cluster.getMaster();

  //wait for balancer to come online
  utility.waitFor(60000, new Waiter.Predicate<Exception>() {
    @Override
    public boolean evaluate() {
      return master.isInitialized() &&
          ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline();
    }
  });
  admin = utility.getAdmin();
}
 
源代码8 项目: hbase   文件: TestMasterObserver.java
@Test
public void testStarted() throws Exception {
  MiniHBaseCluster cluster = UTIL.getHBaseCluster();

  HMaster master = cluster.getMaster();
  assertTrue("Master should be active", master.isActiveMaster());
  MasterCoprocessorHost host = master.getMasterCoprocessorHost();
  assertNotNull("CoprocessorHost should not be null", host);
  CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
  assertNotNull("CPMasterObserver coprocessor not found or not installed!", cp);

  // check basic lifecycle
  assertTrue("MasterObserver should have been started", cp.wasStarted());
  assertTrue("preMasterInitialization() hook should have been called",
      cp.wasMasterInitializationCalled());
  assertTrue("postStartMaster() hook should have been called",
      cp.wasStartMasterCalled());
}
 
源代码9 项目: hbase   文件: TestMasterObserver.java
@Test
public void testNamespaceOperations() throws Exception {
  MiniHBaseCluster cluster = UTIL.getHBaseCluster();
  String testNamespace = "observed_ns";
  HMaster master = cluster.getMaster();
  MasterCoprocessorHost host = master.getMasterCoprocessorHost();
  CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);

  // create a table
  Admin admin = UTIL.getAdmin();

  admin.listNamespaces();
  assertTrue("preListNamespaces should have been called", cp.preListNamespacesCalled);
  assertTrue("postListNamespaces should have been called", cp.postListNamespacesCalled);

  admin.createNamespace(NamespaceDescriptor.create(testNamespace).build());
  assertTrue("Test namespace should be created", cp.wasCreateNamespaceCalled());

  assertNotNull(admin.getNamespaceDescriptor(testNamespace));
  assertTrue("Test namespace descriptor should have been called",
      cp.wasGetNamespaceDescriptorCalled());
  // This test used to do a bunch w/ bypass but bypass of these table and namespace stuff has
  // been removed so the testing code was removed.
}
 
源代码10 项目: hbase   文件: TestMasterObserver.java
@Test
public void testTableDescriptorsEnumeration() throws Exception {
  MiniHBaseCluster cluster = UTIL.getHBaseCluster();

  HMaster master = cluster.getMaster();
  MasterCoprocessorHost host = master.getMasterCoprocessorHost();
  CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
  cp.resetStates();

  GetTableDescriptorsRequest req =
      RequestConverter.buildGetTableDescriptorsRequest((List<TableName>)null);
  master.getMasterRpcServices().getTableDescriptors(null, req);

  assertTrue("Coprocessor should be called on table descriptors request",
    cp.wasGetTableDescriptorsCalled());
}
 
@Test
public void testExceptionDuringInitialization() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);  // Let's fail fast.
  conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
  TEST_UTIL.startMiniCluster(2);
  try {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    // Trigger one regionserver to fail as if it came up with a coprocessor
    // that fails during initialization
    final HRegionServer regionServer = cluster.getRegionServer(0);
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      FailedInitializationObserver.class.getName());
    regionServer.getRegionServerCoprocessorHost().loadSystemCoprocessors(conf,
      CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
    TEST_UTIL.waitFor(10000, 1000, new Predicate<Exception>() {
      @Override
      public boolean evaluate() throws Exception {
        return regionServer.isAborted();
      }
    });
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
源代码12 项目: hbase   文件: TestRegionMover.java
@Test
public void testWithAck() throws Exception {
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  HRegionServer regionServer = cluster.getRegionServer(0);
  String rsName = regionServer.getServerName().getAddress().toString();
  int numRegions = regionServer.getNumberOfOnlineRegions();
  RegionMoverBuilder rmBuilder =
    new RegionMoverBuilder(rsName, TEST_UTIL.getConfiguration()).ack(true).maxthreads(8);
  try (RegionMover rm = rmBuilder.build()) {
    LOG.info("Unloading " + regionServer.getServerName());
    rm.unload();
    assertEquals(0, regionServer.getNumberOfOnlineRegions());
    LOG.info("Successfully Unloaded\nNow Loading");
    rm.load();
    assertEquals(numRegions, regionServer.getNumberOfOnlineRegions());
    // Repeat the same load. It should be very fast because all regions are already moved.
    rm.load();
  }
}
 
源代码13 项目: hbase   文件: TestRegionMover.java
@Test
public void testRegionServerPort() {
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  HRegionServer regionServer = cluster.getRegionServer(0);
  String rsName = regionServer.getServerName().getHostname();

  final int PORT = 16021;
  Configuration conf = TEST_UTIL.getConfiguration();
  String originalPort = conf.get(HConstants.REGIONSERVER_PORT);
  conf.set(HConstants.REGIONSERVER_PORT, Integer.toString(PORT));
  RegionMoverBuilder rmBuilder = new RegionMoverBuilder(rsName, conf);
  assertEquals(PORT, rmBuilder.port);
  if (originalPort != null) {
    conf.set(HConstants.REGIONSERVER_PORT, originalPort);
  }
}
 
源代码14 项目: phoenix   文件: TestWALRecoveryCaching.java
/**
 * @param cluster
 * @param indexTable
 * @param primaryTable
 * @return
 * @throws Exception
 */
private ServerName getSharedServer(MiniHBaseCluster cluster, byte[] indexTable,
    byte[] primaryTable) throws Exception {
  Set<ServerName> indexServers = getServersForTable(cluster, indexTable);
  Set<ServerName> primaryServers = getServersForTable(cluster, primaryTable);

  Set<ServerName> joinSet = new HashSet<ServerName>(indexServers);
  joinSet.addAll(primaryServers);
  // if there is already an overlap, then find it and return it
  if (joinSet.size() < indexServers.size() + primaryServers.size()) {
    // find the first overlapping server
    for (ServerName server : joinSet) {
      if (indexServers.contains(server) && primaryServers.contains(server)) {
        return server;
      }
    }
    throw new RuntimeException(
        "Couldn't find a matching server on which both the primary and index table live, "
            + "even though they have overlapping server sets");
  }
  return null;
}
 
源代码15 项目: presto   文件: TestingPhoenixServer.java
private TestingPhoenixServer()
{
    // keep references to prevent GC from resetting the log levels
    apacheLogger = java.util.logging.Logger.getLogger("org.apache");
    apacheLogger.setLevel(Level.SEVERE);
    zookeeperLogger = java.util.logging.Logger.getLogger(ZooKeeperServer.class.getName());
    zookeeperLogger.setLevel(Level.OFF);
    securityLogger = java.util.logging.Logger.getLogger("SecurityLogger.org.apache");
    securityLogger.setLevel(Level.SEVERE);
    // to squelch the SecurityLogger,
    // instantiate logger with config above before config is overriden again in HBase test franework
    org.apache.commons.logging.LogFactory.getLog("SecurityLogger.org.apache.hadoop.hbase.server");
    this.conf.set("hbase.security.logger", "ERROR");
    this.conf.setInt(MASTER_INFO_PORT, -1);
    this.conf.setInt(REGIONSERVER_INFO_PORT, -1);
    this.conf.setInt(HBASE_CLIENT_RETRIES_NUMBER, 1);
    this.conf.setBoolean("phoenix.schema.isNamespaceMappingEnabled", true);
    this.conf.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
    this.hbaseTestingUtility = new HBaseTestingUtility(conf);

    try {
        MiniZooKeeperCluster zkCluster = this.hbaseTestingUtility.startMiniZKCluster();
        port = zkCluster.getClientPort();

        MiniHBaseCluster hbaseCluster = hbaseTestingUtility.startMiniHBaseCluster(1, 4);
        hbaseCluster.waitForActiveAndReadyMaster();
        LOG.info("Phoenix server ready: %s", getJdbcUrl());
    }
    catch (Exception e) {
        throw new RuntimeException("Can't start phoenix server.", e);
    }
}
 
源代码16 项目: Kylin   文件: MiniClusterTest.java
public static void main(String[] args) throws Exception {

        File miniclusterFolder = new File(AbstractKylinTestCase.MINICLUSTER_TEST_DATA);
        System.out.println("----" + miniclusterFolder.getAbsolutePath());

        //save the dfs data to minicluster folder
        System.setProperty("test.build.data", miniclusterFolder.getAbsolutePath());

        MiniHBaseCluster hbCluster = testUtil.startMiniCluster(1);
        testUtil.startMiniMapReduceCluster();
        System.out.println("Minicluster started.");

        Configuration conf = hbCluster.getConf();
        String host = conf.get(HConstants.ZOOKEEPER_QUORUM);
        String port = conf.get(HConstants.ZOOKEEPER_CLIENT_PORT);
        String parent = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT);

        // reduce rpc retry
        conf.set(HConstants.HBASE_CLIENT_PAUSE, "3000");
        conf.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "5");
        conf.set(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, "60000");

        String connectionUrl = "hbase:" + host + ":" + port + ":" + parent;

        System.out.println("hbase connection url:" + connectionUrl);

        testUtil.getDFSCluster().getFileSystem();
        testUtil.shutdownMiniMapReduceCluster();
        testUtil.shutdownMiniCluster();
    }
 
源代码17 项目: phoenix   文件: IndexLoadBalancerIT.java
@Test(timeout = 180000)
public void testRoundRobinAssignmentDuringIndexTableCreation() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    TableName tableName = TableName.valueOf("testRoundRobinAssignmentDuringIndexTableCreation");
    TableName indexTableName =
            TableName.valueOf("testRoundRobinAssignmentDuringIndexTableCreation_index");
    createUserAndIndexTable(tableName, indexTableName);
    boolean isRegionColocated =
            checkForColocation(master, tableName.getNameAsString(), indexTableName
                    .getNameAsString());
    assertTrue("User regions and index regions should colocate.", isRegionColocated);
}
 
private void start() throws Exception {
    int regionServerPort = REGION_SERVER_PORT + memberNumber;
    int regionServerInfoPort = REGION_SERVER_WEB_PORT + memberNumber;
    int derbyPort = SQLConfiguration.DEFAULT_NETWORK_BIND_PORT + memberNumber;

    Configuration config = SpliceTestPlatformConfig.create(
            hbaseTargetDirectory,
            0,
            0,
            0, //regionServerPort,
            0, //regionServerInfoPort,
            derbyPort,
            false,
            null,
            secure
    );

    String keytab = hbaseTargetDirectory+"/splice.keytab";
    UserGroupInformation ugi;
    if (secure) {
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI("hbase/[email protected]", keytab);
        UserGroupInformation.setLoginUser(ugi);
    } else {
        ugi = UserGroupInformation.getCurrentUser();
    }

    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            MiniHBaseCluster miniHBaseCluster = new MiniHBaseCluster(config, 0, 1);
            miniHBaseCluster.startRegionServer();
            return null;
        }

    });
}
 
源代码19 项目: phoenix   文件: IndexLoadBalancerIT.java
@Test(timeout = 180000)
public void testBalanceCluster() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    master.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", false);
    master.getConfiguration().setBoolean("hbase.master.startup.retainassign", false);
    master.getConfiguration().setBoolean("hbase.master.loadbalance.bytable", false);

    TableName tableName = TableName.valueOf("testBalanceCluster");
    TableName indexTableName = TableName.valueOf("testBalanceCluster_index");
    createUserAndIndexTable(tableName, indexTableName);
    HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf("testBalanceCluster1"));
    htd1.addFamily(new HColumnDescriptor("fam1"));
    char c = 'A';
    byte[][] split1 = new byte[12][];
    for (int i = 0; i < 12; i++) {
        byte[] b = { (byte) c };
        split1[i] = b;
        c++;
    }
    admin.setBalancerRunning(false, false);
    admin.createTable(htd1, split1);
    admin.disableTable(tableName);
    admin.enableTable(tableName);
    admin.setBalancerRunning(true, false);
    admin.balancer();
    boolean isRegionsColocated =
            checkForColocation(master, tableName.getNameAsString(), indexTableName
                    .getNameAsString());
    assertTrue("User regions and index regions should colocate.", isRegionsColocated);
}
 
源代码20 项目: phoenix   文件: IndexLoadBalancerIT.java
@Test(timeout = 180000)
public void testBalanceByTable() throws Exception {
    ZooKeeperWatcher zkw = UTIL.getZooKeeperWatcher(UTIL);
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    master.getConfiguration().setBoolean("hbase.master.loadbalance.bytable", true);
    TableName tableName = TableName.valueOf("testBalanceByTable");
    TableName indexTableName = TableName.valueOf("testBalanceByTable_index");
    createUserAndIndexTable(tableName, indexTableName);
    HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf("testBalanceByTable1"));
    htd1.addFamily(new HColumnDescriptor("fam1"));
    char c = 'A';
    byte[][] split1 = new byte[12][];
    for (int i = 0; i < 12; i++) {
        byte[] b = { (byte) c };
        split1[i] = b;
        c++;
    }
    admin.disableTable(tableName);
    admin.enableTable(tableName);
    admin.setBalancerRunning(true, false);
    boolean isRegionColocated =
            checkForColocation(master, tableName.getNameAsString(), indexTableName
                    .getNameAsString());
    assertTrue("User regions and index regions should colocate.", isRegionColocated);
    admin.balancer();
    Thread.sleep(10000);
    ZKAssign.blockUntilNoRIT(zkw);
    while (master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
        Threads.sleep(1000);
    }
    isRegionColocated =
            checkForColocation(master, tableName.getNameAsString(), indexTableName
                    .getNameAsString());
    assertTrue("User regions and index regions should colocate.", isRegionColocated);
}
 
源代码21 项目: phoenix   文件: TestWALRecoveryCaching.java
/**
 * @param cluster
 * @param server
 * @param table
 * @return
 */
private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
    byte[] table) {
  List<HRegion> online = Collections.emptyList();
  for (RegionServerThread rst : cluster.getRegionServerThreads()) {
    // if its the server we are going to kill, get the regions we want to reassign
    if (rst.getRegionServer().getServerName().equals(server)) {
      online = rst.getRegionServer().getOnlineRegions(org.apache.hadoop.hbase.TableName.valueOf(table));
      break;
    }
  }
  return online;
}
 
源代码22 项目: hbase   文件: Action.java
protected void resumeRs(ServerName server) throws IOException {
  getLogger().info("Resuming regionserver {}", server);
  cluster.resumeRegionServer(server);
  if(!(cluster instanceof MiniHBaseCluster)){
    cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout);
  }
  getLogger().info("Resuming regionserver {}. Reported num of rs:{}", server,
    cluster.getClusterMetrics().getLiveServerMetrics().size());
}
 
源代码23 项目: phoenix   文件: TestWALRecoveryCaching.java
/**
 * @param cluster
 * @param server
 * @param table
 * @return
 */
private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
    byte[] table) {
  List<HRegion> online = Collections.emptyList();
  for (RegionServerThread rst : cluster.getRegionServerThreads()) {
    // if its the server we are going to kill, get the regions we want to reassign
    if (rst.getRegionServer().getServerName().equals(server)) {
        online = rst.getRegionServer().getRegions(org.apache.hadoop.hbase.TableName.valueOf(table));
        break;
    }
  }
  return online;
}
 
源代码24 项目: phoenix   文件: TestWALRecoveryCaching.java
/**
 * @param miniHBaseCluster
 * @param server
 * @param bs
 * @return
 */
private List<HRegion> getRegionsFromServerForTable(MiniHBaseCluster cluster, ServerName server,
    byte[] table) {
  List<HRegion> online = Collections.emptyList();
  for (RegionServerThread rst : cluster.getRegionServerThreads()) {
    // if its the server we are going to kill, get the regions we want to reassign
    if (rst.getRegionServer().getServerName().equals(server)) {
      online = rst.getRegionServer().getOnlineRegions(table);
      break;
    }
  }
  return online;
}
 
源代码25 项目: hbase   文件: TestMasterReplication.java
private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table,
    final byte[] row) throws IOException {
  final Admin admin = utility.getAdmin();
  final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();

  // find the region that corresponds to the given row.
  HRegion region = null;
  for (HRegion candidate : cluster.getRegions(table)) {
    if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
      region = candidate;
      break;
    }
  }
  assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);

  final CountDownLatch latch = new CountDownLatch(1);

  // listen for successful log rolls
  final WALActionsListener listener = new WALActionsListener() {
        @Override
        public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
          latch.countDown();
        }
      };
  region.getWAL().registerWALActionsListener(listener);

  // request a roll
  admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDescriptor().getTableName(),
    region.getRegionInfo().getRegionName()));

  // wait
  try {
    latch.await();
  } catch (InterruptedException exception) {
    LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " +
        "replication tests fail, it's probably because we should still be waiting.");
    Thread.currentThread().interrupt();
  }
  region.getWAL().unregisterWALActionsListener(listener);
}
 
源代码26 项目: phoenix   文件: TestWALRecoveryCaching.java
private Set<ServerName> getServersForTable(MiniHBaseCluster cluster, byte[] table)
    throws Exception {
  Set<ServerName> indexServers = new HashSet<ServerName>();
  for (Region region : cluster.getRegions(table)) {
    indexServers.add(cluster.getServerHoldingRegion(null, region.getRegionInfo().getRegionName()));
  }
  return indexServers;
}
 
@Test
public void testCachedConnections() throws Exception {
  final String tableName = generateUniqueName();
  final String index1Name = generateUniqueName();
  final Connection conn = DriverManager.getConnection(getUrl());

  final HBaseAdmin admin = getUtility().getHBaseAdmin();
  final MiniHBaseCluster cluster = getUtility().getHBaseCluster();
  final HRegionServer regionServer = cluster.getRegionServer(0);
  Configuration conf = admin.getConfiguration();
  final int noOfOrgs = 20;
  final AtomicBoolean flag = new AtomicBoolean();
  flag.set(false);
  // create table and indices
  String createTableSql = "CREATE TABLE " + tableName
      + "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, v2 INTEGER, v3 INTEGER) VERSIONS=1 SPLIT ON ('"
      + ORG_PREFIX + "-" + noOfOrgs / 2 + "')";
  conn.createStatement().execute(createTableSql);
  conn.createStatement().execute("CREATE INDEX " + index1Name + " ON " + tableName + "(v1)");
  List<HRegionInfo> regions = admin.getTableRegions(TableName.valueOf(tableName));
  final HRegionInfo regionInfo = regions.get(0);

  writeToTable(tableName, conn, noOfOrgs);
  int beforeRegionCloseCount = getActiveConnections(regionServer, conf);
  int regionsCount = admin.getOnlineRegions(regionServer.getServerName()).size();
  admin.unassign(regionInfo.getEncodedNameAsBytes(), true);
  while(!(admin.getOnlineRegions(regionServer.getServerName()).size() < regionsCount));
  int afterRegionCloseCount = getActiveConnections(regionServer, conf);
  assertTrue("Cached connections not closed when region closes: ",
  afterRegionCloseCount == beforeRegionCloseCount && afterRegionCloseCount > 0);

}
 
@Test
public void testChangingNumberOfPeerRegionServers() throws IOException, InterruptedException {
  LOG.info("testSimplePutDelete");
  MiniHBaseCluster peerCluster = UTIL2.getMiniHBaseCluster();
  // This test wants two RS's up. We only run one generally so add one.
  peerCluster.startRegionServer();
  Waiter.waitFor(peerCluster.getConfiguration(), 30000, new Waiter.Predicate<Exception>() {
    @Override public boolean evaluate() throws Exception {
      return peerCluster.getLiveRegionServerThreads().size() > 1;
    }
  });
  int numRS = peerCluster.getRegionServerThreads().size();

  doPutTest(Bytes.toBytes(1));

  int rsToStop = peerCluster.getServerWithMeta() == 0 ? 1 : 0;
  peerCluster.stopRegionServer(rsToStop);
  peerCluster.waitOnRegionServer(rsToStop);

  // Sanity check
  assertEquals(numRS - 1, peerCluster.getRegionServerThreads().size());

  doPutTest(Bytes.toBytes(2));

  peerCluster.startRegionServer();

  // Sanity check
  assertEquals(numRS, peerCluster.getRegionServerThreads().size());

  doPutTest(Bytes.toBytes(3));
}
 
源代码29 项目: hbase   文件: SecureTestUtil.java
private static List<AccessController> getAccessControllers(MiniHBaseCluster cluster) {
  List<AccessController> result = Lists.newArrayList();
  for (RegionServerThread t: cluster.getLiveRegionServerThreads()) {
    for (HRegion region: t.getRegionServer().getOnlineRegionsLocalContext()) {
      Coprocessor cp = region.getCoprocessorHost().findCoprocessor(AccessController.class);
      if (cp != null) {
        result.add((AccessController)cp);
      }
    }
  }
  return result;
}
 
源代码30 项目: hbase   文件: SecureTestUtil.java
private static Map<AccessController,Long> getAuthManagerMTimes(MiniHBaseCluster cluster) {
  Map<AccessController,Long> result = Maps.newHashMap();
  for (AccessController ac: getAccessControllers(cluster)) {
    result.put(ac, ac.getAuthManager().getMTime());
  }
  return result;
}
 
 类所在包
 同包方法