类org.apache.hadoop.hbase.zookeeper.ZKWatcher源码实例Demo

下面列出了怎么用org.apache.hadoop.hbase.zookeeper.ZKWatcher的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: hbase   文件: TestMetaTableLocator.java
@Before
public void before() throws IOException {
  this.abortable = new Abortable() {
    @Override
    public void abort(String why, Throwable e) {
      LOG.info(why, e);
    }

    @Override
    public boolean isAborted() {
      return false;
    }
  };
  this.watcher =
    new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), this.abortable, true);
}
 
源代码2 项目: hbase   文件: TestSplitLogWorker.java
@Before
public void setup() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  Configuration conf = TEST_UTIL.getConfiguration();
  zkw = new ZKWatcher(TEST_UTIL.getConfiguration(),
      "split-log-worker-tests", null);
  ds = new DummyServer(zkw, conf);
  ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode);
  ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode);
  assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode), not(is(-1)));
  LOG.debug(zkw.getZNodePaths().baseZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().splitLogZNode);
  assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().splitLogZNode), not(is(-1)));

  LOG.debug(zkw.getZNodePaths().splitLogZNode + " created");
  ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().rsZNode);
  assertThat(ZKUtil.checkExists(zkw, zkw.getZNodePaths().rsZNode), not(is(-1)));

  SplitLogCounters.resetCounters();
  executorService = new ExecutorService("TestSplitLogWorker");
  executorService.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, 10);
}
 
源代码3 项目: hbase   文件: IntegrationTestZKAndFSPermissions.java
private void testZNodeACLs() throws IOException, KeeperException, InterruptedException {

    ZKWatcher watcher = new ZKWatcher(conf, "IntegrationTestZnodeACLs", null);
    RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher);

    String baseZNode = watcher.getZNodePaths().baseZNode;

    LOG.info("");
    LOG.info("***********************************************************************************");
    LOG.info("Checking ZK permissions, root znode: " + baseZNode);
    LOG.info("***********************************************************************************");
    LOG.info("");

    checkZnodePermsRecursive(watcher, zk, baseZNode);

    LOG.info("Checking ZK permissions: SUCCESS");
  }
 
源代码4 项目: hbase   文件: TestMetaWithReplicasBasic.java
@Test
public void testZookeeperNodesForReplicas() throws Exception {
  // Checks all the znodes exist when meta's replicas are enabled
  ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  Configuration conf = TEST_UTIL.getConfiguration();
  String baseZNode =
    conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode =
    ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ProtobufUtil.toServerName(data);
  for (int i = 1; i < 3; i++) {
    String secZnode = ZNodePaths.joinZNode(baseZNode,
      conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i);
    String str = zkw.getZNodePaths().getZNodeForReplica(i);
    assertTrue(str.equals(secZnode));
    // check that the data in the znode is parseable (this would also mean the znode exists)
    data = ZKUtil.getData(zkw, secZnode);
    ProtobufUtil.toServerName(data);
  }
}
 
源代码5 项目: hbase   文件: TestAdmin4.java
@Test
public void testDecommissionAndStopRegionServers() throws Exception {
  List<ServerName> decommissionedRegionServers = ADMIN.listDecommissionedRegionServers();
  assertTrue(decommissionedRegionServers.isEmpty());

  ArrayList<ServerName> clusterRegionServers =
    new ArrayList<>(ADMIN.getRegionServers(true));

  List<ServerName> serversToDecommission = new ArrayList<ServerName>();
  serversToDecommission.add(clusterRegionServers.get(0));

  // Decommission
  ADMIN.decommissionRegionServers(serversToDecommission, true);
  assertEquals(1, ADMIN.listDecommissionedRegionServers().size());

  // Stop decommissioned region server and verify it is removed from draining znode
  ServerName serverName = serversToDecommission.get(0);
  ADMIN.stopRegionServer(serverName.getHostname()+":"+serverName.getPort());
  assertNotEquals("RS not removed from decommissioned list", -1,
    TEST_UTIL.waitFor(10000, () -> ADMIN.listDecommissionedRegionServers().isEmpty()));
  ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  assertEquals(-1, ZKUtil.checkExists(zkw,
    ZNodePaths.joinZNode(zkw.getZNodePaths().drainingZNode, serverName.getServerName())));
}
 
源代码6 项目: hbase   文件: IntegrationTestMetaReplicas.java
@BeforeClass
public static void setUp() throws Exception {
  // Set up the integration test util
  if (util == null) {
    util = new IntegrationTestingUtility();
  }
  util.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3);
  util.getConfiguration().setInt(
      StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000);
  // Make sure there are three servers.
  util.initializeCluster(3);
  ZKWatcher zkw = util.getZooKeeperWatcher();
  Configuration conf = util.getConfiguration();
  String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
      HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode,
      conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ProtobufUtil.toServerName(data);
  waitUntilZnodeAvailable(1);
  waitUntilZnodeAvailable(2);
}
 
源代码7 项目: hbase   文件: TestZKPermissionWatcher.java
@BeforeClass
public static void beforeClass() throws Exception {
  // setup configuration
  Configuration conf = UTIL.getConfiguration();
  SecureTestUtil.enableSecurity(conf);

  // start minicluster
  UTIL.startMiniCluster();
  AUTH_A = new AuthManager(conf);
  AUTH_B = new AuthManager(conf);
  WATCHER_A = new ZKPermissionWatcher(
      new ZKWatcher(conf, "TestZKPermissionsWatcher_1", ABORTABLE), AUTH_A, conf);
  WATCHER_B = new ZKPermissionWatcher(
      new ZKWatcher(conf, "TestZKPermissionsWatcher_2", ABORTABLE), AUTH_B, conf);
  WATCHER_A.start();
  WATCHER_B.start();
}
 
/**
 * Initialize this region server flush procedure manager
 * Uses a zookeeper based member controller.
 * @param rss region server
 * @throws KeeperException if the zookeeper cannot be reached
 */
@Override
public void initialize(RegionServerServices rss) throws KeeperException {
  this.rss = rss;
  ZKWatcher zkw = rss.getZooKeeper();
  this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
    MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE);

  Configuration conf = rss.getConfiguration();
  long keepAlive = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT);
  int opThreads = conf.getInt(FLUSH_REQUEST_THREADS_KEY, FLUSH_REQUEST_THREADS_DEFAULT);

  // create the actual flush table procedure member
  ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(),
    opThreads, keepAlive);
  this.member = new ProcedureMember(memberRpcs, pool, new FlushTableSubprocedureBuilder());
}
 
源代码9 项目: hbase   文件: ZKProcedureUtil.java
/**
 * Top-level watcher/controller for procedures across the cluster.
 * <p>
 * On instantiation, this ensures the procedure znodes exist.  This however requires the passed in
 *  watcher has been started.
 * @param watcher watcher for the cluster ZK. Owned by <tt>this</tt> and closed via
 *          {@link #close()}
 * @param procDescription name of the znode describing the procedure to run
 * @throws KeeperException when the procedure znodes cannot be created
 */
public ZKProcedureUtil(ZKWatcher watcher, String procDescription)
    throws KeeperException {
  super(watcher);
  // make sure we are listening for events
  watcher.registerListener(this);
  // setup paths for the zknodes used in procedures
  this.baseZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, procDescription);
  acquiredZnode = ZNodePaths.joinZNode(baseZNode, ACQUIRED_BARRIER_ZNODE_DEFAULT);
  reachedZnode = ZNodePaths.joinZNode(baseZNode, REACHED_BARRIER_ZNODE_DEFAULT);
  abortZnode = ZNodePaths.joinZNode(baseZNode, ABORT_ZNODE_DEFAULT);

  // first make sure all the ZK nodes exist
  // make sure all the parents exist (sometimes not the case in tests)
  ZKUtil.createWithParents(watcher, acquiredZnode);
  // regular create because all the parents exist
  ZKUtil.createAndFailSilent(watcher, reachedZnode);
  ZKUtil.createAndFailSilent(watcher, abortZnode);
}
 
源代码10 项目: hbase   文件: RegionServerSnapshotManager.java
/**
 * Create a default snapshot handler - uses a zookeeper based member controller.
 * @param rss region server running the handler
 * @throws KeeperException if the zookeeper cluster cannot be reached
 */
@Override
public void initialize(RegionServerServices rss) throws KeeperException {
  this.rss = rss;
  ZKWatcher zkw = rss.getZooKeeper();
  this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
      SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION);

  // read in the snapshot request configuration properties
  Configuration conf = rss.getConfiguration();
  long keepAlive = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT);
  int opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT);

  // create the actual snapshot procedure member
  ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(),
    opThreads, keepAlive);
  this.member = new ProcedureMember(memberRpcs, pool, new SnapshotSubprocedureBuilder());
}
 
源代码11 项目: hbase   文件: ZKReplicationQueueStorage.java
public ZKReplicationQueueStorage(ZKWatcher zookeeper, Configuration conf) {
  super(zookeeper, conf);

  String queuesZNodeName = conf.get("zookeeper.znode.replication.rs", "rs");
  String hfileRefsZNodeName = conf.get(ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY,
    ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT);
  this.queuesZNode = ZNodePaths.joinZNode(replicationZNode, queuesZNodeName);
  this.hfileRefsZNode = ZNodePaths.joinZNode(replicationZNode, hfileRefsZNodeName);
  this.regionsZNode = ZNodePaths.joinZNode(replicationZNode, conf
      .get(ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY, ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT));
}
 
源代码12 项目: pentaho-hadoop-shims   文件: HadoopShim.java
@Override
public Class[] getHbaseDependencyClasses() {
  return new Class[] {
    HConstants.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class,
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, Put.class,
    RpcServer.class, CompatibilityFactory.class, JobUtil.class, TableMapper.class, FastLongHistogram.class,
    Snapshot.class, ZooKeeper.class, Channel.class, Message.class, UnsafeByteOperations.class, Lists.class,
    Tracer.class, MetricRegistry.class, ArrayUtils.class, ObjectMapper.class, Versioned.class,
    JsonView.class, ZKWatcher.class, CacheLoader.class
  };
}
 
源代码13 项目: hbase   文件: TestHMasterRPCException.java
@Before
public void setUp() throws Exception {
  Configuration conf = testUtil.getConfiguration();
  conf.set(HConstants.MASTER_PORT, "0");
  conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 2000);
  testUtil.startMiniZKCluster();

  ZKWatcher watcher = testUtil.getZooKeeperWatcher();
  ZKUtil.createWithParents(watcher, watcher.getZNodePaths().masterAddressZNode,
          Bytes.toBytes("fake:123"));
  master = new HMaster(conf);
  rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT);
}
 
源代码14 项目: hbase   文件: TestRegionServerHostname.java
@Test
public void testRegionServerHostnameReportedToMaster() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(HRegionServer.RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY,
  true);
  StartMiniClusterOption option = StartMiniClusterOption.builder()
      .numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
  TEST_UTIL.startMiniCluster(option);
  boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
  int expectedRS = NUM_RS + (tablesOnMaster? 1: 0);
  try (ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher()) {
    List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode);
    assertEquals(expectedRS, servers.size());
  }
}
 
源代码15 项目: hbase   文件: ReplicationTrackerZKImpl.java
public ReplicationTrackerZKImpl(ZKWatcher zookeeper, Abortable abortable, Stoppable stopper) {
  this.zookeeper = zookeeper;
  this.abortable = abortable;
  this.stopper = stopper;
  this.zookeeper.registerListener(new OtherRegionServerWatcher(this.zookeeper));
  // watch the changes
  refreshOtherRegionServersList(true);
}
 
源代码16 项目: hbase   文件: TestReplicationStateZKImpl.java
private static String initPeerClusterState(String baseZKNode)
    throws IOException, KeeperException {
  // Add a dummy region server and set up the cluster id
  Configuration testConf = new Configuration(conf);
  testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode);
  ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null);
  String fakeRs = ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode,
          "hostname1.example.org:1234");
  ZKUtil.createWithParents(zkw1, fakeRs);
  ZKClusterId.setClusterId(zkw1, new ClusterId());
  return ZKConfig.getZooKeeperClusterKey(testConf);
}
 
源代码17 项目: hbase   文件: TestMetaReplicasAddressChange.java
@Test
public void testMetaAddressChange() throws Exception {
  // checks that even when the meta's location changes, the various
  // caches update themselves. Uses the master operations to test
  // this
  Configuration conf = TEST_UTIL.getConfiguration();
  ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  String baseZNode =
    conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
  String primaryMetaZnode =
    ZNodePaths.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server"));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
  ServerName currentServer = ProtobufUtil.toServerName(data);
  Collection<ServerName> liveServers = TEST_UTIL.getAdmin()
    .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
  ServerName moveToServer =
    liveServers.stream().filter(s -> !currentServer.equals(s)).findAny().get();
  final TableName tableName = name.getTableName();
  TEST_UTIL.createTable(tableName, "f");
  assertTrue(TEST_UTIL.getAdmin().tableExists(tableName));
  TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
    moveToServer);
  assertNotEquals(currentServer, moveToServer);
  LOG.debug("CurrentServer={}, moveToServer={}", currentServer, moveToServer);
  TEST_UTIL.waitFor(60000, () -> {
    byte[] bytes = ZKUtil.getData(zkw, primaryMetaZnode);
    ServerName actualServer = ProtobufUtil.toServerName(bytes);
    return moveToServer.equals(actualServer);
  });
  TEST_UTIL.getAdmin().disableTable(tableName);
  assertTrue(TEST_UTIL.getAdmin().isTableDisabled(tableName));
}
 
源代码18 项目: hbase   文件: ReplicationLogCleaner.java
@Override
public void setConf(Configuration config) {
  // Make my own Configuration.  Then I'll have my own connection to zk that
  // I can close myself when comes time.
  Configuration conf = new Configuration(config);
  try {
    setConf(conf, new ZKWatcher(conf, "replicationLogCleaner", null));
  } catch (IOException e) {
    LOG.error("Error while configuring " + this.getClass().getName(), e);
  }
}
 
源代码19 项目: hbase   文件: TestActiveMasterManager.java
@Test public void testRestartMaster() throws IOException, KeeperException {
  ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(),
    "testActiveMasterManagerFromZK", null, true);
  try {
    ZKUtil.deleteNode(zk, zk.getZNodePaths().masterAddressZNode);
    ZKUtil.deleteNode(zk, zk.getZNodePaths().clusterStateZNode);
  } catch(KeeperException.NoNodeException nne) {}

  // Create the master node with a dummy address
  ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis());
  // Should not have a master yet
  DummyMaster dummyMaster = new DummyMaster(zk,master);
  ClusterStatusTracker clusterStatusTracker =
    dummyMaster.getClusterStatusTracker();
  ActiveMasterManager activeMasterManager =
    dummyMaster.getActiveMasterManager();
  assertFalse(activeMasterManager.clusterHasActiveMaster.get());
  assertFalse(activeMasterManager.getActiveMasterServerName().isPresent());

  // First test becoming the active master uninterrupted
  MonitoredTask status = Mockito.mock(MonitoredTask.class);
  clusterStatusTracker.setClusterUp();

  activeMasterManager.blockUntilBecomingActiveMaster(100, status);
  assertTrue(activeMasterManager.clusterHasActiveMaster.get());
  assertMaster(zk, master);
  assertMaster(zk, activeMasterManager.getActiveMasterServerName().get());

  // Now pretend master restart
  DummyMaster secondDummyMaster = new DummyMaster(zk,master);
  ActiveMasterManager secondActiveMasterManager =
    secondDummyMaster.getActiveMasterManager();
  assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get());
  activeMasterManager.blockUntilBecomingActiveMaster(100, status);
  assertTrue(activeMasterManager.clusterHasActiveMaster.get());
  assertMaster(zk, master);
  assertMaster(zk, activeMasterManager.getActiveMasterServerName().get());
  assertMaster(zk, secondActiveMasterManager.getActiveMasterServerName().get());
}
 
源代码20 项目: hbase   文件: DumpReplicationQueues.java
public String dumpQueues(ZKWatcher zkw, Set<String> peerIds,
    boolean hdfs) throws Exception {
  ReplicationQueueStorage queueStorage;
  ReplicationTracker replicationTracker;
  StringBuilder sb = new StringBuilder();

  queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, getConf());
  replicationTracker = ReplicationFactory.getReplicationTracker(zkw, new WarnOnlyAbortable(),
    new WarnOnlyStoppable());
  Set<String> liveRegionServers = new HashSet<>(replicationTracker.getListOfRegionServers());

  // Loops each peer on each RS and dumps the queues
  List<ServerName> regionservers = queueStorage.getListOfReplicators();
  if (regionservers == null || regionservers.isEmpty()) {
    return sb.toString();
  }
  for (ServerName regionserver : regionservers) {
    List<String> queueIds = queueStorage.getAllQueues(regionserver);
    if (!liveRegionServers.contains(regionserver.getServerName())) {
      deadRegionServers.add(regionserver.getServerName());
    }
    for (String queueId : queueIds) {
      ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
      List<String> wals = queueStorage.getWALsInQueue(regionserver, queueId);
      Collections.sort(wals);
      if (!peerIds.contains(queueInfo.getPeerId())) {
        deletedQueues.add(regionserver + "/" + queueId);
        sb.append(formatQueue(regionserver, queueStorage, queueInfo, queueId, wals, true, hdfs));
      } else {
        sb.append(formatQueue(regionserver, queueStorage, queueInfo, queueId, wals, false, hdfs));
      }
    }
  }
  return sb.toString();
}
 
源代码21 项目: hbase   文件: ZKVisibilityLabelWatcher.java
public ZKVisibilityLabelWatcher(ZKWatcher watcher, VisibilityLabelsCache labelsCache,
                                Configuration conf) {
  super(watcher);
  this.labelsCache = labelsCache;
  String labelZnodeParent = conf.get(VISIBILITY_LABEL_ZK_PATH, DEFAULT_VISIBILITY_LABEL_NODE);
  String userAuthsZnodeParent = conf.get(VISIBILITY_USER_AUTHS_ZK_PATH,
      DEFAULT_VISIBILITY_USER_AUTHS_NODE);
  this.labelZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, labelZnodeParent);
  this.userAuthsZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode,
          userAuthsZnodeParent);
}
 
源代码22 项目: hbase   文件: VisibilityLabelsCache.java
private VisibilityLabelsCache(ZKWatcher watcher, Configuration conf) throws IOException {
  zkVisibilityWatcher = new ZKVisibilityLabelWatcher(watcher, this, conf);
  try {
    zkVisibilityWatcher.start();
  } catch (KeeperException ke) {
    LOG.error("ZooKeeper initialization failed", ke);
    throw new IOException(ke);
  }
}
 
源代码23 项目: hbase   文件: ZKSecretWatcher.java
public ZKSecretWatcher(Configuration conf,
    ZKWatcher watcher,
    AuthenticationTokenSecretManager secretManager) {
  super(watcher);
  this.secretManager = secretManager;
  String keyZNodeParent = conf.get("zookeeper.znode.tokenauth.parent", DEFAULT_ROOT_NODE);
  this.baseKeyZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, keyZNodeParent);
  this.keysParentZNode = ZNodePaths.joinZNode(baseKeyZNode, DEFAULT_KEYS_PARENT);
}
 
源代码24 项目: hbase   文件: AuthenticationTokenSecretManager.java
/**
 * Create a new secret manager instance for generating keys.
 * @param conf Configuration to use
 * @param zk Connection to zookeeper for handling leader elections
 * @param keyUpdateInterval Time (in milliseconds) between rolling a new master key for token signing
 * @param tokenMaxLifetime Maximum age (in milliseconds) before a token expires and is no longer valid
 */
/* TODO: Restrict access to this constructor to make rogues instances more difficult.
 * For the moment this class is instantiated from
 * org.apache.hadoop.hbase.ipc.SecureServer so public access is needed.
 */
public AuthenticationTokenSecretManager(Configuration conf,
                                        ZKWatcher zk, String serverName,
                                        long keyUpdateInterval, long tokenMaxLifetime) {
  this.zkWatcher = new ZKSecretWatcher(conf, zk, this);
  this.keyUpdateInterval = keyUpdateInterval;
  this.tokenMaxLifetime = tokenMaxLifetime;
  this.leaderElector = new LeaderElector(zk, serverName);
  this.name = NAME_PREFIX+serverName;
  this.clusterId = new ZKClusterId(zk, zk);
}
 
源代码25 项目: hbase   文件: MockServer.java
/**
 * @param htu Testing utility to use
 * @param zkw If true, create a zkw.
 * @throws ZooKeeperConnectionException
 * @throws IOException
 */
public MockServer(final HBaseTestingUtility htu, final boolean zkw)
throws ZooKeeperConnectionException, IOException {
  this.htu = htu;
  this.zk = zkw?
    new ZKWatcher(htu.getConfiguration(), NAME.toString(), this, true):
    null;
}
 
源代码26 项目: hbase   文件: ZKPermissionWatcher.java
public ZKPermissionWatcher(ZKWatcher watcher,
    AuthManager authManager, Configuration conf) {
  super(watcher);
  this.authManager = authManager;
  String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);
  this.aclZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, aclZnodeParent);
  executor = Executors.newSingleThreadExecutor(
      Threads.newDaemonThreadFactory("zk-permission-watcher"));
}
 
源代码27 项目: hbase   文件: TestLogsCleaner.java
/**
 * When zk is working both files should be returned
 * @throws Exception from ZK watcher
 */
@Test
public void testZooKeeperNormal() throws Exception {
  ReplicationLogCleaner cleaner = new ReplicationLogCleaner();

  // Subtract 1000 from current time so modtime is for sure older
  // than 'now'.
  long modTime = System.currentTimeMillis() - 1000;
  List<FileStatus> dummyFiles = Arrays.asList(
      new FileStatus(100, false, 3, 100, modTime, new Path("log1")),
      new FileStatus(100, false, 3, 100, modTime, new Path("log2"))
  );

  ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null);
  try {
    cleaner.setConf(conf, zkw);
    cleaner.preClean();
    Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
    Iterator<FileStatus> iter = filesToDelete.iterator();
    assertTrue(iter.hasNext());
    assertEquals(new Path("log1"), iter.next().getPath());
    assertTrue(iter.hasNext());
    assertEquals(new Path("log2"), iter.next().getPath());
    assertFalse(iter.hasNext());
  } finally {
    zkw.close();
  }
}
 
源代码28 项目: hbase   文件: TestZKProcedureControllers.java
/**
 * Verify that the prepare, commit and abort nodes for the operation are removed from zookeeper
 */
private void verifyZooKeeperClean(String operationName, ZKWatcher watcher,
    ZKProcedureUtil controller) throws Exception {
  String prepare = ZKProcedureUtil.getAcquireBarrierNode(controller, operationName);
  String commit = ZKProcedureUtil.getReachedBarrierNode(controller, operationName);
  String abort = ZKProcedureUtil.getAbortNode(controller, operationName);
  assertEquals("Didn't delete prepare node", -1, ZKUtil.checkExists(watcher, prepare));
  assertEquals("Didn't delete commit node", -1, ZKUtil.checkExists(watcher, commit));
  assertEquals("Didn't delete abort node", -1, ZKUtil.checkExists(watcher, abort));
}
 
源代码29 项目: hbase   文件: RegionServerTracker.java
public RegionServerTracker(ZKWatcher watcher, MasterServices server,
    ServerManager serverManager) {
  super(watcher);
  this.server = server;
  this.serverManager = serverManager;
  this.executor = Executors.newSingleThreadExecutor(
    new ThreadFactoryBuilder().setDaemon(true).setNameFormat("RegionServerTracker-%d").build());
}
 
源代码30 项目: hbase   文件: AssignmentManager.java
public void start() throws IOException, KeeperException {
  if (!running.compareAndSet(false, true)) {
    return;
  }

  LOG.trace("Starting assignment manager");

  // Start the Assignment Thread
  startAssignmentThread();

  // load meta region state
  ZKWatcher zkw = master.getZooKeeper();
  // it could be null in some tests
  if (zkw != null) {
    // here we are still in the early steps of active master startup. There is only one thread(us)
    // can access AssignmentManager and create region node, so here we do not need to lock the
    // region node.
    RegionState regionState = MetaTableLocator.getMetaRegionState(zkw);
    RegionStateNode regionNode =
      regionStates.getOrCreateRegionStateNode(RegionInfoBuilder.FIRST_META_REGIONINFO);
    regionNode.setRegionLocation(regionState.getServerName());
    regionNode.setState(regionState.getState());
    if (regionNode.getProcedure() != null) {
      regionNode.getProcedure().stateLoaded(this, regionNode);
    }
    if (regionState.getServerName() != null) {
      regionStates.addRegionToServer(regionNode);
    }
    setMetaAssigned(regionState.getRegion(), regionState.getState() == State.OPEN);
  }
}
 
 类所在包
 类方法
 同包方法