org.apache.hadoop.hbase.zookeeper.ZKUtil#getData ( )源码实例Demo

下面列出了org.apache.hadoop.hbase.zookeeper.ZKUtil#getData ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: hbase   文件: ZKReplicationQueueStorage.java
@Override
public long getWALPosition(ServerName serverName, String queueId, String fileName)
    throws ReplicationException {
  byte[] bytes;
  try {
    bytes = ZKUtil.getData(zookeeper, getFileNode(serverName, queueId, fileName));
  } catch (KeeperException | InterruptedException e) {
    throw new ReplicationException("Failed to get log position (serverName=" + serverName +
      ", queueId=" + queueId + ", fileName=" + fileName + ")", e);
  }
  try {
    return ZKUtil.parseWALPositionFrom(bytes);
  } catch (DeserializationException de) {
    LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})",
        serverName, queueId, fileName);
  }
  // if we can not parse the position, start at the beginning of the wal file again
  return 0;
}
 
源代码2 项目: hbase   文件: ZKReplicationPeerStorage.java
private SyncReplicationState getSyncReplicationState(String peerId, String path)
    throws ReplicationException {
  try {
    byte[] data = ZKUtil.getData(zookeeper, path);
    if (data == null || data.length == 0) {
      if (ZKUtil.checkExists(zookeeper, getPeerNode(peerId)) != -1) {
        // should be a peer from previous version, set the sync replication state for it.
        ZKUtil.createSetData(zookeeper, path, NONE_STATE_ZNODE_BYTES);
        return SyncReplicationState.NONE;
      } else {
        throw new ReplicationException(
          "Replication peer sync state shouldn't be empty, peerId=" + peerId);
      }
    }
    return SyncReplicationState.parseFrom(data);
  } catch (KeeperException | InterruptedException | IOException e) {
    throw new ReplicationException(
      "Error getting sync replication state of path " + path + " for peer with id=" + peerId, e);
  }
}
 
源代码3 项目: hbase   文件: TestMetaWithReplicasBasic.java
@Test
public void testZookeeperNodesForReplicas() throws Exception {
  // Checks all the znodes exist when meta's replicas are enabled
  ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  Configuration conf = TEST_UTIL.getConfiguration();
  String baseZNode =
    conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode =
    ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ProtobufUtil.toServerName(data);
  for (int i = 1; i < 3; i++) {
    String secZnode = ZNodePaths.joinZNode(baseZNode,
      conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i);
    String str = zkw.getZNodePaths().getZNodeForReplica(i);
    assertTrue(str.equals(secZnode));
    // check that the data in the znode is parseable (this would also mean the znode exists)
    data = ZKUtil.getData(zkw, secZnode);
    ProtobufUtil.toServerName(data);
  }
}
 
源代码4 项目: hbase   文件: TestSplitLogManager.java
@Test
public void testTaskResigned() throws Exception {
  LOG.info("TestTaskResigned - resubmit task node once in RESIGNED state");
  assertEquals(0, tot_mgr_resubmit.sum());
  slm = new SplitLogManager(master, conf);
  assertEquals(0, tot_mgr_resubmit.sum());
  TaskBatch batch = new TaskBatch();
  String tasknode = submitTaskAndWait(batch, "foo/1");
  assertEquals(0, tot_mgr_resubmit.sum());
  final ServerName worker1 = ServerName.valueOf("worker1,1,1");
  assertEquals(0, tot_mgr_resubmit.sum());
  SplitLogTask slt = new SplitLogTask.Resigned(worker1);
  assertEquals(0, tot_mgr_resubmit.sum());
  ZKUtil.setData(zkw, tasknode, slt.toByteArray());
  ZKUtil.checkExists(zkw, tasknode);
  // Could be small race here.
  if (tot_mgr_resubmit.sum() == 0) {
    waitForCounter(tot_mgr_resubmit, 0, 1, to/2);
  }
  assertEquals(1, tot_mgr_resubmit.sum());

  byte[] taskstate = ZKUtil.getData(zkw, tasknode);
  slt = SplitLogTask.parseFrom(taskstate);
  assertTrue(slt.isUnassigned(master.getServerName()));
}
 
源代码5 项目: hbase   文件: TestSplitLogWorker.java
@Test
public void testAcquireTaskAtStartup() throws Exception {
  LOG.info("testAcquireTaskAtStartup");
  SplitLogCounters.resetCounters();
  final String TATAS = "tatas";
  final ServerName RS = ServerName.valueOf("rs,1,1");
  RegionServerServices mockedRS = getRegionServer(RS);
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS),
    new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(),
      Ids.OPEN_ACL_UNSAFE,
      CreateMode.PERSISTENT);

  SplitLogWorker slw =
      new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
  slw.start();
  try {
    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
    byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS));
    SplitLogTask slt = SplitLogTask.parseFrom(bytes);
    assertTrue(slt.isOwned(RS));
  } finally {
   stopSplitLogWorker(slw);
  }
}
 
源代码6 项目: hbase   文件: IntegrationTestMetaReplicas.java
@BeforeClass
public static void setUp() throws Exception {
  // Set up the integration test util
  if (util == null) {
    util = new IntegrationTestingUtility();
  }
  util.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3);
  util.getConfiguration().setInt(
      StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000);
  // Make sure there are three servers.
  util.initializeCluster(3);
  ZKWatcher zkw = util.getZooKeeperWatcher();
  Configuration conf = util.getConfiguration();
  String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
      HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode,
      conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ProtobufUtil.toServerName(data);
  waitUntilZnodeAvailable(1);
  waitUntilZnodeAvailable(2);
}
 
源代码7 项目: hbase   文件: ZKReplicationPeerStorage.java
@Override
public void transitPeerSyncReplicationState(String peerId) throws ReplicationException {
  String newStateNode = getNewSyncReplicationStateNode(peerId);
  try {
    byte[] data = ZKUtil.getData(zookeeper, newStateNode);
    ZKUtil.multiOrSequential(zookeeper,
      Arrays.asList(ZKUtilOp.setData(newStateNode, NONE_STATE_ZNODE_BYTES),
        ZKUtilOp.setData(getSyncReplicationStateNode(peerId), data)),
      false);
  } catch (KeeperException | InterruptedException e) {
    throw new ReplicationException(
      "Error transiting sync replication state for peer with id=" + peerId, e);
  }
}
 
源代码8 项目: hbase   文件: RpcThrottleStorage.java
public boolean isRpcThrottleEnabled() throws IOException {
  try {
    byte[] upData = ZKUtil.getData(zookeeper, rpcThrottleZNode);
    return upData == null || Bytes.toBoolean(upData);
  } catch (KeeperException | InterruptedException e) {
    throw new IOException("Failed to get rpc throttle", e);
  }
}
 
源代码9 项目: hbase   文件: RegionServerTracker.java
private Pair<ServerName, RegionServerInfo> getServerInfo(String name)
    throws KeeperException, IOException {
  ServerName serverName = ServerName.parseServerName(name);
  String nodePath = ZNodePaths.joinZNode(watcher.getZNodePaths().rsZNode, name);
  byte[] data;
  try {
    data = ZKUtil.getData(watcher, nodePath);
  } catch (InterruptedException e) {
    throw (InterruptedIOException) new InterruptedIOException().initCause(e);
  }
  if (data == null) {
    // we should receive a children changed event later and then we will expire it, so we still
    // need to add it to the region server set.
    LOG.warn("Server node {} does not exist, already dead?", name);
    return Pair.newPair(serverName, null);
  }
  if (data.length == 0 || !ProtobufUtil.isPBMagicPrefix(data)) {
    // this should not happen actually, unless we have bugs or someone has messed zk up.
    LOG.warn("Invalid data for region server node {} on zookeeper, data length = {}", name,
      data.length);
    return Pair.newPair(serverName, null);
  }
  RegionServerInfo.Builder builder = RegionServerInfo.newBuilder();
  int magicLen = ProtobufUtil.lengthOfPBMagic();
  ProtobufUtil.mergeFrom(builder, data, magicLen, data.length - magicLen);
  return Pair.newPair(serverName, builder.build());
}
 
源代码10 项目: hbase   文件: RSGroupInfoManagerImpl.java
private List<RSGroupInfo> retrieveGroupListFromZookeeper() throws IOException {
  String groupBasePath = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, RS_GROUP_ZNODE);
  List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
  // Overwrite any info stored by table, this takes precedence
  try {
    if (ZKUtil.checkExists(watcher, groupBasePath) != -1) {
      List<String> children = ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath);
      if (children == null) {
        return RSGroupInfoList;
      }
      for (String znode : children) {
        byte[] data = ZKUtil.getData(watcher, ZNodePaths.joinZNode(groupBasePath, znode));
        if (data.length > 0) {
          ProtobufUtil.expectPBMagicPrefix(data);
          ByteArrayInputStream bis =
            new ByteArrayInputStream(data, ProtobufUtil.lengthOfPBMagic(), data.length);
          RSGroupInfoList
            .add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
        }
      }
      LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size());
    }
  } catch (KeeperException | DeserializationException | InterruptedException e) {
    throw new IOException("Failed to read rsGroupZNode", e);
  }
  return RSGroupInfoList;
}
 
源代码11 项目: hbase   文件: TestMetaReplicasAddressChange.java
@Test
public void testMetaAddressChange() throws Exception {
  // checks that even when the meta's location changes, the various
  // caches update themselves. Uses the master operations to test
  // this
  Configuration conf = TEST_UTIL.getConfiguration();
  ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  String baseZNode =
    conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode =
    ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ServerName currentServer = ProtobufUtil.toServerName(data);
  Collection<ServerName> liveServers = TEST_UTIL.getAdmin()
    .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
  ServerName moveToServer =
    liveServers.stream().filter(s -> !currentServer.equals(s)).findAny().get();
  final TableName tableName = name.getTableName();
  TEST_UTIL.createTable(tableName, "f");
  assertTrue(TEST_UTIL.getAdmin().tableExists(tableName));
  TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
    moveToServer);
  assertNotEquals(currentServer, moveToServer);
  LOG.debug("CurrentServer={}, moveToServer={}", currentServer, moveToServer);
  TEST_UTIL.waitFor(60000, () -> {
    byte[] bytes = ZKUtil.getData(zkw, primaryMetaZnode);
    ServerName actualServer = ProtobufUtil.toServerName(bytes);
    return moveToServer.equals(actualServer);
  });
  TEST_UTIL.getAdmin().disableTable(tableName);
  assertTrue(TEST_UTIL.getAdmin().isTableDisabled(tableName));
}
 
源代码12 项目: hbase   文件: TestSplitLogManager.java
/**
 * Test whether the splitlog correctly creates a task in zookeeper
 */
@Test
public void testTaskCreation() throws Exception {

  LOG.info("TestTaskCreation - test the creation of a task in zk");
  slm = new SplitLogManager(master, conf);
  TaskBatch batch = new TaskBatch();

  String tasknode = submitTaskAndWait(batch, "foo/1");

  byte[] data = ZKUtil.getData(zkw, tasknode);
  SplitLogTask slt = SplitLogTask.parseFrom(data);
  LOG.info("Task node created " + slt.toString());
  assertTrue(slt.isUnassigned(master.getServerName()));
}
 
源代码13 项目: hbase   文件: TestSplitLogManager.java
@Test
public void testRescanCleanup() throws Exception {
  LOG.info("TestRescanCleanup - ensure RESCAN nodes are cleaned up");

  slm = new SplitLogManager(master, conf);
  TaskBatch batch = new TaskBatch();

  String tasknode = submitTaskAndWait(batch, "foo/1");
  int version = ZKUtil.checkExists(zkw, tasknode);
  final ServerName worker1 = ServerName.valueOf("worker1,1,1");
  SplitLogTask slt = new SplitLogTask.Owned(worker1);
  ZKUtil.setData(zkw, tasknode, slt.toByteArray());
  waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
  waitForCounter(new Expr() {
    @Override
    public long eval() {
      return (tot_mgr_resubmit.sum() + tot_mgr_resubmit_failed.sum());
    }
  }, 0, 1, 5*60000); // wait long enough
  Assert.assertEquals("Could not run test. Lost ZK connection?",
    0, tot_mgr_resubmit_failed.sum());
  int version1 = ZKUtil.checkExists(zkw, tasknode);
  assertTrue(version1 > version);
  byte[] taskstate = ZKUtil.getData(zkw, tasknode);
  slt = SplitLogTask.parseFrom(taskstate);
  assertTrue(slt.isUnassigned(master.getServerName()));

  waitForCounter(tot_mgr_rescan_deleted, 0, 1, to/2);
}
 
源代码14 项目: hbase   文件: TestSplitLogManager.java
@Test
public void testDeadWorker() throws Exception {
  LOG.info("testDeadWorker");

  conf.setLong("hbase.splitlog.max.resubmit", 0);
  slm = new SplitLogManager(master, conf);
  TaskBatch batch = new TaskBatch();

  String tasknode = submitTaskAndWait(batch, "foo/1");
  int version = ZKUtil.checkExists(zkw, tasknode);
  final ServerName worker1 = ServerName.valueOf("worker1,1,1");
  SplitLogTask slt = new SplitLogTask.Owned(worker1);
  ZKUtil.setData(zkw, tasknode, slt.toByteArray());
  if (tot_mgr_heartbeat.sum() == 0) {
    waitForCounter(tot_mgr_heartbeat, 0, 1, to/2);
  }
  slm.handleDeadWorker(worker1);
  if (tot_mgr_resubmit.sum() == 0) {
    waitForCounter(tot_mgr_resubmit, 0, 1, to+to/2);
  }
  if (tot_mgr_resubmit_dead_server_task.sum() == 0) {
    waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to/2);
  }

  int version1 = ZKUtil.checkExists(zkw, tasknode);
  assertTrue(version1 > version);
  byte[] taskstate = ZKUtil.getData(zkw, tasknode);
  slt = SplitLogTask.parseFrom(taskstate);
  assertTrue(slt.isUnassigned(master.getServerName()));
  return;
}
 
源代码15 项目: hbase   文件: TestSplitLogWorker.java
@Test
public void testRaceForTask() throws Exception {
  LOG.info("testRaceForTask");
  SplitLogCounters.resetCounters();
  final String TRFT = "trft";
  final ServerName SVR1 = ServerName.valueOf("svr1,1,1");
  final ServerName SVR2 = ServerName.valueOf("svr2,1,1");
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT),
    new SplitLogTask.Unassigned(MANAGER).toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
  RegionServerServices mockedRS1 = getRegionServer(SVR1);
  RegionServerServices mockedRS2 = getRegionServer(SVR2);
  SplitLogWorker slw1 =
      new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS1, neverEndingTask);
  SplitLogWorker slw2 =
      new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS2, neverEndingTask);
  slw1.start();
  slw2.start();
  try {
    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
    // Assert that either the tot_wkr_failed_to_grab_task_owned count was set of if
    // not it, that we fell through to the next counter in line and it was set.
    assertTrue(waitForCounterBoolean(SplitLogCounters.tot_wkr_failed_to_grab_task_owned, 0, 1,
        WAIT_TIME, false) ||
      SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.sum() == 1);
    byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TRFT));
    SplitLogTask slt = SplitLogTask.parseFrom(bytes);
    assertTrue(slt.isOwned(SVR1) || slt.isOwned(SVR2));
  } finally {
    stopSplitLogWorker(slw1);
    stopSplitLogWorker(slw2);
  }
}
 
源代码16 项目: hbase   文件: TestSplitLogWorker.java
@Test
public void testPreemptTask() throws Exception {
  LOG.info("testPreemptTask");
  SplitLogCounters.resetCounters();
  final ServerName SRV = ServerName.valueOf("tpt_svr,1,1");
  final String PATH = ZKSplitLog.getEncodedNodeName(zkw, "tpt_task");
  RegionServerServices mockedRS = getRegionServer(SRV);
  SplitLogWorker slw =
      new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
  slw.start();
  try {
    Thread.yield(); // let the worker start
    Thread.sleep(1000);
    waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME);

    // this time create a task node after starting the splitLogWorker
    zkw.getRecoverableZooKeeper().create(PATH,
      new SplitLogTask.Unassigned(MANAGER).toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
    assertEquals(1, slw.getTaskReadySeq());
    byte [] bytes = ZKUtil.getData(zkw, PATH);
    SplitLogTask slt = SplitLogTask.parseFrom(bytes);
    assertTrue(slt.isOwned(SRV));
    slt = new SplitLogTask.Owned(MANAGER);
    ZKUtil.setData(zkw, PATH, slt.toByteArray());
    waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME);
  } finally {
    stopSplitLogWorker(slw);
  }
}
 
源代码17 项目: hbase   文件: TestSplitLogWorker.java
@Test
public void testAcquireMultiTasks() throws Exception {
  LOG.info("testAcquireMultiTasks");
  SplitLogCounters.resetCounters();
  final String TATAS = "tatas";
  final ServerName RS = ServerName.valueOf("rs,1,1");
  final int maxTasks = 3;
  Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  testConf.setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, maxTasks);
  RegionServerServices mockedRS = getRegionServer(RS);
  for (int i = 0; i < maxTasks; i++) {
    zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i),
      new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(),
        Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
  }

  SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask);
  slw.start();
  try {
    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, maxTasks, WAIT_TIME);
    for (int i = 0; i < maxTasks; i++) {
      byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i));
      SplitLogTask slt = SplitLogTask.parseFrom(bytes);
      assertTrue(slt.isOwned(RS));
    }
  } finally {
    stopSplitLogWorker(slw);
  }
}
 
源代码18 项目: hbase   文件: ZKProcedureCoordinator.java
/**
 * Start monitoring znodes in ZK - subclass hook to start monitoring znodes they are about.
 * @return true if succeed, false if encountered initialization errors.
 */
@Override
final public boolean start(final ProcedureCoordinator coordinator) {
  if (this.coordinator != null) {
    throw new IllegalStateException(
      "ZKProcedureCoordinator already started and already has listener installed");
  }
  this.coordinator = coordinator;

  try {
    this.zkProc = new ZKProcedureUtil(watcher, procedureType) {
      @Override
      public void nodeCreated(String path) {
        if (!isInProcedurePath(path)) return;
        LOG.debug("Node created: " + path);
        logZKTree(this.baseZNode);
        if (isAcquiredPathNode(path)) {
          // node wasn't present when we created the watch so zk event triggers acquire
          coordinator.memberAcquiredBarrier(ZKUtil.getNodeName(ZKUtil.getParent(path)),
            ZKUtil.getNodeName(path));
        } else if (isReachedPathNode(path)) {
          // node was absent when we created the watch so zk event triggers the finished barrier.

          // TODO Nothing enforces that acquire and reached znodes from showing up in wrong order.
          String procName = ZKUtil.getNodeName(ZKUtil.getParent(path));
          String member = ZKUtil.getNodeName(path);
          // get the data from the procedure member
          try {
            byte[] dataFromMember = ZKUtil.getData(watcher, path);
            // ProtobufUtil.isPBMagicPrefix will check null
            if (dataFromMember != null && dataFromMember.length > 0) {
              if (!ProtobufUtil.isPBMagicPrefix(dataFromMember)) {
                ForeignException ee = new ForeignException(coordName,
                  "Failed to get data from finished node or data is illegally formatted:"
                      + path);
                coordinator.abortProcedure(procName, ee);
              } else {
                dataFromMember = Arrays.copyOfRange(dataFromMember, ProtobufUtil.lengthOfPBMagic(),
                  dataFromMember.length);
                LOG.debug("Finished data from procedure '{}' member '{}': {}", procName, member,
                    new String(dataFromMember, StandardCharsets.UTF_8));
                coordinator.memberFinishedBarrier(procName, member, dataFromMember);
              }
            } else {
              coordinator.memberFinishedBarrier(procName, member, dataFromMember);
            }
          } catch (KeeperException e) {
            ForeignException ee = new ForeignException(coordName, e);
            coordinator.abortProcedure(procName, ee);
          } catch (InterruptedException e) {
            ForeignException ee = new ForeignException(coordName, e);
            coordinator.abortProcedure(procName, ee);
          }
        } else if (isAbortPathNode(path)) {
          abort(path);
        } else {
          LOG.debug("Ignoring created notification for node:" + path);
        }
      }
    };
    zkProc.clearChildZNodes();
  } catch (KeeperException e) {
    LOG.error("Unable to start the ZK-based Procedure Coordinator rpcs.", e);
    return false;
  }

  LOG.debug("Starting controller for procedure member=" + coordName);
  return true;
}
 
源代码19 项目: hbase   文件: TestSplitLogWorker.java
@Test
public void testMultipleTasks() throws Exception {
  LOG.info("testMultipleTasks");
  SplitLogCounters.resetCounters();
  final ServerName SRV = ServerName.valueOf("tmt_svr,1,1");
  final String PATH1 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task");
  RegionServerServices mockedRS = getRegionServer(SRV);
  SplitLogWorker slw =
      new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
  slw.start();
  try {
    Thread.yield(); // let the worker start
    Thread.sleep(100);
    waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME);

    SplitLogTask unassignedManager =
      new SplitLogTask.Unassigned(MANAGER);
    zkw.getRecoverableZooKeeper().create(PATH1, unassignedManager.toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
    // now the worker is busy doing the above task

    // create another task
    final String PATH2 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task_2");
    zkw.getRecoverableZooKeeper().create(PATH2, unassignedManager.toByteArray(),
      Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);

    // preempt the first task, have it owned by another worker
    final ServerName anotherWorker = ServerName.valueOf("another-worker,1,1");
    SplitLogTask slt = new SplitLogTask.Owned(anotherWorker);
    ZKUtil.setData(zkw, PATH1, slt.toByteArray());
    waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME);

    waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME);
    assertEquals(2, slw.getTaskReadySeq());
    byte [] bytes = ZKUtil.getData(zkw, PATH2);
    slt = SplitLogTask.parseFrom(bytes);
    assertTrue(slt.isOwned(SRV));
  } finally {
    stopSplitLogWorker(slw);
  }
}
 
源代码20 项目: hbase   文件: TestSplitLogWorker.java
@Test
public void testRescan() throws Exception {
  LOG.info("testRescan");
  SplitLogCounters.resetCounters();
  final ServerName SRV = ServerName.valueOf("svr,1,1");
  RegionServerServices mockedRS = getRegionServer(SRV);
  slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask);
  slw.start();
  Thread.yield(); // let the worker start
  Thread.sleep(100);

  String task = ZKSplitLog.getEncodedNodeName(zkw, "task");
  SplitLogTask slt = new SplitLogTask.Unassigned(MANAGER);
  zkw.getRecoverableZooKeeper().create(task,slt.toByteArray(), Ids.OPEN_ACL_UNSAFE,
    CreateMode.PERSISTENT);

  waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME);
  // now the worker is busy doing the above task

  // preempt the task, have it owned by another worker
  ZKUtil.setData(zkw, task, slt.toByteArray());
  waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME);

  // create a RESCAN node
  String rescan = ZKSplitLog.getEncodedNodeName(zkw, "RESCAN");
  rescan = zkw.getRecoverableZooKeeper().create(rescan, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE,
    CreateMode.PERSISTENT_SEQUENTIAL);

  waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME);
  // RESCAN node might not have been processed if the worker became busy
  // with the above task. preempt the task again so that now the RESCAN
  // node is processed
  ZKUtil.setData(zkw, task, slt.toByteArray());
  waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 1, 2, WAIT_TIME);
  waitForCounter(SplitLogCounters.tot_wkr_task_acquired_rescan, 0, 1, WAIT_TIME);

  List<String> nodes = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().splitLogZNode);
  LOG.debug(Objects.toString(nodes));
  int num = 0;
  for (String node : nodes) {
    num++;
    if (node.startsWith("RESCAN")) {
      String name = ZKSplitLog.getEncodedNodeName(zkw, node);
      String fn = ZKSplitLog.getFileName(name);
      byte [] data = ZKUtil.getData(zkw,
              ZNodePaths.joinZNode(zkw.getZNodePaths().splitLogZNode, fn));
      slt = SplitLogTask.parseFrom(data);
      assertTrue(slt.toString(), slt.isDone(SRV));
    }
  }
  assertEquals(2, num);
}