From 1a9b7a9fbd0bcaa0750d66435f9949ef40abae6f Mon Sep 17 00:00:00 2001 From: Apekshit Sharma Date: Tue, 31 Oct 2017 12:13:49 -0700 Subject: [PATCH] HBASE-19114 Split out o.a.h.h.zookeeper from hbase-server and hbase-client --- conf/log4j.properties | 2 +- dev-support/findHangingTests.py | 1 - .../src/main/resources/log4j.properties | 2 +- .../src/main/resources/log4j.properties | 2 +- hbase-assembly/pom.xml | 4 + .../src/main/assembly/hadoop-two-compat.xml | 1 + hbase-client/pom.xml | 6 +- .../org/apache/hadoop/hbase/MetaTableAccessor.java | 2 +- .../hadoop/hbase/client/ClusterConnection.java | 2 +- .../hbase/client/ConnectionImplementation.java | 18 +- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 24 +- .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 4 - .../org/apache/hadoop/hbase/client/RegionInfo.java | 2 +- .../hadoop/hbase/client/ZKAsyncRegistry.java | 8 +- .../hbase/client/ZooKeeperKeepAliveConnection.java | 9 +- .../hadoop/hbase/client/ZooKeeperRegistry.java | 4 +- .../hadoop/hbase/ipc/CoprocessorRpcUtils.java | 4 +- .../hbase/ipc/RemoteWithExtrasException.java | 3 +- .../hbase/security/access/AccessControlClient.java | 2 +- .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 10 +- .../hbase/zookeeper/MasterAddressTracker.java | 40 +- .../hadoop/hbase/zookeeper/MetaTableLocator.java | 61 +- .../apache/hadoop/hbase/zookeeper/ZKClusterId.java | 18 +- .../org/apache/hadoop/hbase/zookeeper/ZKUtil.java | 1880 +------------------ hbase-common/pom.xml | 4 + .../java/org/apache/hadoop/hbase/Abortable.java | 0 .../java/org/apache/hadoop/hbase/HConstants.java | 2 + .../hadoop/hbase/protobuf/ProtobufHelpers.java | 43 + .../hadoop/hbase/protobuf/ProtobufMagic.java | 54 +- .../regionserver/SecureBulkLoadEndpointClient.java | 1 - hbase-examples/pom.xml | 4 + .../example/TestZooKeeperScanPolicyObserver.java | 2 +- hbase-it/pom.xml | 5 + .../hadoop/hbase/DistributedHBaseCluster.java | 1 + .../hadoop/hbase/IntegrationTestMetaReplicas.java | 14 +- .../test/IntegrationTestZKAndFSPermissions.java | 20 +- .../org/apache/hadoop/hbase/mapreduce/Import.java | 8 +- .../mapreduce/replication/VerifyReplication.java | 6 +- hbase-replication/pom.xml | 4 + .../hbase/replication/ReplicationFactory.java | 8 +- .../hbase/replication/ReplicationPeerZKImpl.java | 22 +- .../hbase/replication/ReplicationPeersZKImpl.java | 47 +- .../replication/ReplicationQueuesArguments.java | 11 +- .../ReplicationQueuesClientArguments.java | 4 +- .../replication/ReplicationQueuesClientZKImpl.java | 30 +- .../hbase/replication/ReplicationQueuesZKImpl.java | 127 +- .../hbase/replication/ReplicationStateZKBase.java | 26 +- .../replication/ReplicationTrackerZKImpl.java | 19 +- .../TableBasedReplicationQueuesImpl.java | 5 +- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 41 +- .../hbase/rsgroup/VerifyingRSGroupAdminClient.java | 14 +- hbase-server/pom.xml | 10 + .../main/java/org/apache/hadoop/hbase/Server.java | 4 +- .../apache/hadoop/hbase/ZKNamespaceManager.java | 50 +- .../java/org/apache/hadoop/hbase/ZNodeClearer.java | 25 +- .../hbase/backup/example/HFileArchiveManager.java | 28 +- .../backup/example/TableHFileArchiveTracker.java | 32 +- .../hbase/backup/example/ZKTableArchiveClient.java | 8 +- .../ZKSplitLogManagerCoordination.java | 87 +- .../coordination/ZkCoordinatedStateManager.java | 4 +- .../coordination/ZkSplitLogWorkerCoordination.java | 86 +- .../hadoop/hbase/master/ActiveMasterManager.java | 27 +- .../DrainingServerTracker.java | 19 +- .../org/apache/hadoop/hbase/master/HMaster.java | 27 +- .../hadoop/hbase/master/HMasterCommandLine.java | 6 +- .../hadoop/hbase/master/MasterMetaBootstrap.java | 8 +- .../hbase/master/MetricsMasterWrapperImpl.java | 9 +- .../{zookeeper => master}/RegionServerTracker.java | 25 +- .../apache/hadoop/hbase/master/ServerManager.java | 13 +- .../{zookeeper => master}/SplitOrMergeTracker.java | 35 +- .../master/cleaner/ReplicationZKNodeCleaner.java | 21 +- .../master/replication/ReplicationManager.java | 6 +- .../hbase/procedure/ZKProcedureCoordinator.java | 40 +- .../hbase/procedure/ZKProcedureMemberRpcs.java | 43 +- .../hadoop/hbase/procedure/ZKProcedureUtil.java | 47 +- .../RegionServerFlushTableProcedureManager.java | 5 +- .../hadoop/hbase/regionserver/HRegionServer.java | 50 +- .../MetricsRegionServerWrapperImpl.java | 4 +- .../RecoveringRegionWatcher.java | 13 +- .../snapshot/RegionServerSnapshotManager.java | 5 +- .../replication/HBaseReplicationEndpoint.java | 19 +- .../master/ReplicationHFileCleaner.java | 11 +- .../replication/master/ReplicationLogCleaner.java | 9 +- .../hbase/replication/master/TableCFsUpdater.java | 18 +- .../regionserver/DumpReplicationQueues.java | 6 +- .../regionserver/ReplicationSyncUp.java | 13 +- .../hbase/security/access/AccessControlLists.java | 3 +- .../hbase/security/access/AccessController.java | 4 +- .../hbase/security/access/TableAuthManager.java | 8 +- .../hbase/security/access/ZKPermissionWatcher.java | 63 +- .../token/AuthenticationTokenSecretManager.java | 18 +- .../hadoop/hbase/security/token/TokenUtil.java | 4 +- .../hbase/security/token/ZKSecretWatcher.java | 62 +- .../DefaultVisibilityLabelServiceImpl.java | 4 +- .../security/visibility/VisibilityLabelsCache.java | 8 +- .../hbase/security/visibility/VisibilityUtils.java | 4 +- .../visibility/ZKVisibilityLabelWatcher.java | 32 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 20 +- .../apache/hadoop/hbase/util/HBaseFsckRepair.java | 2 +- .../org/apache/hadoop/hbase/util/RegionMover.java | 4 +- .../apache/hadoop/hbase/util/ZKDataMigrator.java | 19 +- .../hadoop/hbase/util/hbck/ReplicationChecker.java | 4 +- .../org/apache/hadoop/hbase/wal/WALSplitter.java | 6 +- .../src/main/resources/hbase-webapps/master/zk.jsp | 7 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 25 +- .../hadoop/hbase/MockRegionServerServices.java | 11 +- .../hbase/TestMetaTableAccessorNoCluster.java | 5 +- .../apache/hadoop/hbase/TestMetaTableLocator.java | 6 +- .../org/apache/hadoop/hbase/TestMultiVersions.java | 7 +- .../org/apache/hadoop/hbase/TestZooKeeper.java | 90 +- .../example/TestZooKeeperTableArchiveClient.java | 8 +- .../hbase/client/HConnectionTestingUtility.java | 3 +- .../org/apache/hadoop/hbase/client/TestAdmin2.java | 9 +- .../hbase/client/TestHBaseAdminNoCluster.java | 6 +- .../hadoop/hbase/client/TestMetaWithReplicas.java | 32 +- .../client/replication/TestReplicationAdmin.java | 8 +- .../TestMasterCoprocessorExceptionWithAbort.java | 11 +- .../TestMasterCoprocessorExceptionWithRemove.java | 10 +- .../hadoop/hbase/filter/FilterTestingCluster.java | 2 +- .../hadoop/hbase/filter/TestFilterWrapper.java | 2 +- .../hbase/master/MockNoopMasterServices.java | 4 +- .../hadoop/hbase/master/MockRegionServer.java | 13 +- .../hbase/master/TestActiveMasterManager.java | 41 +- .../hbase/master/TestAssignmentListener.java | 14 +- .../hbase/master/TestClockSkewDetection.java | 6 - .../hbase/master/TestDistributedLogSplitting.java | 68 +- .../hbase/master/TestHMasterRPCException.java | 10 +- .../hadoop/hbase/master/TestMasterNoCluster.java | 10 +- .../hbase/master/TestMasterStatusServlet.java | 9 +- .../hadoop/hbase/master/TestMasterWalManager.java | 30 +- .../hbase/master/TestMetaShutdownHandler.java | 6 +- .../hadoop/hbase/master/TestSplitLogManager.java | 95 +- .../hadoop/hbase/master/TestTableStateManager.java | 17 +- .../hbase/master/cleaner/TestHFileCleaner.java | 7 +- .../hbase/master/cleaner/TestHFileLinkCleaner.java | 6 +- .../hbase/master/cleaner/TestLogsCleaner.java | 20 +- .../cleaner/TestReplicationHFileCleaner.java | 20 +- .../cleaner/TestReplicationZKNodeCleaner.java | 7 +- .../hbase/procedure/SimpleRSProcedureManager.java | 5 +- .../hadoop/hbase/procedure/TestZKProcedure.java | 14 +- .../procedure/TestZKProcedureControllers.java | 37 +- .../TestCompactionInDeadRegionServer.java | 11 +- .../hbase/regionserver/TestHeapMemoryManager.java | 5 +- .../regionserver/TestMasterAddressTracker.java | 14 +- .../hbase/regionserver/TestRSStatusServlet.java | 4 +- .../regionserver/TestRegionServerHostname.java | 13 +- .../regionserver/TestRegionServerNoMaster.java | 4 +- .../hbase/regionserver/TestSplitLogWorker.java | 79 +- .../TestSplitTransactionOnCluster.java | 8 +- .../hadoop/hbase/regionserver/TestWALLockup.java | 4 +- .../hbase/replication/TestMasterReplication.java | 13 +- .../replication/TestMultiSlaveReplication.java | 17 +- .../replication/TestPerTableCFReplication.java | 8 +- .../hbase/replication/TestReplicationBase.java | 13 +- .../replication/TestReplicationStateHBaseImpl.java | 13 +- .../replication/TestReplicationStateZKImpl.java | 24 +- .../replication/TestReplicationTableBase.java | 4 +- .../replication/TestReplicationTrackerZKImpl.java | 48 +- .../hbase/replication/TestSerialReplication.java | 7 +- .../replication/master/TestTableCFsUpdater.java | 47 +- .../regionserver/TestGlobalThrottler.java | 6 +- .../regionserver/TestReplicationSourceManager.java | 25 +- .../security/access/TestAccessController2.java | 16 +- .../security/access/TestTablePermissions.java | 7 +- .../security/access/TestZKPermissionWatcher.java | 6 +- .../security/token/TestTokenAuthentication.java | 8 +- .../hbase/security/token/TestZKSecretWatcher.java | 23 +- .../token/TestZKSecretWatcherRefreshKeys.java | 22 +- ...tVisibilityLabelReplicationWithExpAsString.java | 6 +- .../TestVisibilityLabelsReplication.java | 10 +- .../org/apache/hadoop/hbase/util/MockServer.java | 12 +- .../hbase/util/ProcessBasedLocalHBaseCluster.java | 4 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 5 +- ...erableZooKeeper.java => TestRecoverableZK.java} | 8 +- .../hbase/zookeeper/TestZKLeaderManager.java | 18 +- ...KeeperMainServer.java => TestZKMainServer.java} | 14 +- .../apache/hadoop/hbase/zookeeper/TestZKMulti.java | 240 +-- .../hadoop/hbase/zookeeper/TestZooKeeperACL.java | 34 +- .../hbase/zookeeper/TestZooKeeperNodeTracker.java | 40 +- hbase-shell/src/main/ruby/hbase/admin.rb | 4 +- hbase-shell/src/main/ruby/hbase/table.rb | 2 +- hbase-zookeeper/pom.xml | 417 +++++ .../hbase/zookeeper/ClusterStatusTracker.java | 18 +- .../hadoop/hbase/zookeeper/DeletionListener.java | 6 +- .../hadoop/hbase/zookeeper/EmptyWatcher.java | 0 .../apache/hadoop/hbase/zookeeper/HQuorumPeer.java | 2 +- .../hadoop/hbase/zookeeper/InstancePending.java | 0 .../hbase/zookeeper/LoadBalancerTracker.java | 19 +- .../zookeeper/MasterMaintenanceModeTracker.java | 8 +- .../hbase/zookeeper/MiniZooKeeperCluster.java | 0 .../hadoop/hbase/zookeeper/PendingWatcher.java | 0 .../hadoop/hbase/zookeeper/RecoverableZK.java | 12 +- .../hbase/zookeeper/RegionNormalizerTracker.java | 19 +- .../apache/hadoop/hbase/zookeeper/ZKAclReset.java | 18 +- .../hadoop/hbase/zookeeper/ZKLeaderManager.java | 22 +- .../apache/hadoop/hbase/zookeeper/ZKListener.java | 12 +- .../hadoop/hbase/zookeeper/ZKMainServer.java | 4 +- .../apache/hadoop/hbase/zookeeper/ZKMetrics.java | 8 +- .../hadoop/hbase/zookeeper/ZKMetricsListener.java | 2 +- .../apache/hadoop/hbase/zookeeper/ZKNodePaths.java | 60 +- .../hadoop/hbase/zookeeper/ZKNodeTracker.java | 30 +- .../hadoop/hbase/zookeeper/ZKServerTool.java | 0 .../apache/hadoop/hbase/zookeeper/ZKSplitLog.java | 91 +- .../apache/hadoop/hbase/zookeeper/ZKWatcher.java | 95 +- .../zookeeper}/ZooKeeperConnectionException.java | 2 +- .../hadoop/hbase/zookeeper/ZooKeeperUtil.java | 1903 ++++++++++++++++++++ .../hbase/zookeeper/TestInstancePending.java | 0 .../hadoop/hbase/zookeeper/TestZKMetrics.java | 40 +- .../apache/hadoop/hbase/zookeeper/TestZKUtil.java | 25 +- .../hadoop/hbase/zookeeper/TestZKWatcher.java | 7 +- pom.xml | 13 + 211 files changed, 4334 insertions(+), 3735 deletions(-) rename {hbase-client => hbase-common}/src/main/java/org/apache/hadoop/hbase/Abortable.java (100%) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufHelpers.java rename {hbase-client => hbase-common}/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java (64%) rename hbase-server/src/main/java/org/apache/hadoop/hbase/{zookeeper => master}/DrainingServerTracker.java (86%) rename hbase-server/src/main/java/org/apache/hadoop/hbase/{zookeeper => master}/RegionServerTracker.java (87%) rename hbase-server/src/main/java/org/apache/hadoop/hbase/{zookeeper => master}/SplitOrMergeTracker.java (80%) rename hbase-server/src/main/java/org/apache/hadoop/hbase/{zookeeper => regionserver}/RecoveringRegionWatcher.java (86%) rename hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/{TestRecoverableZooKeeper.java => TestRecoverableZK.java} (93%) rename hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/{TestZooKeeperMainServer.java => TestZKMainServer.java} (91%) create mode 100644 hbase-zookeeper/pom.xml rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java (82%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java (93%) rename {hbase-client => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java (100%) rename {hbase-client => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java (98%) rename {hbase-client => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java (100%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java (82%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java (87%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java (100%) rename {hbase-client => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java (100%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZK.java (98%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java (82%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java (86%) rename {hbase-client => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java (88%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java (89%) rename hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java (97%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetrics.java (90%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMetricsListener.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetricsListener.java (98%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodePaths.java (77%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java (88%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java (100%) rename {hbase-server => hbase-zookeeper}/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java (54%) rename hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java (86%) rename {hbase-client/src/main/java/org/apache/hadoop/hbase => hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper}/ZooKeeperConnectionException.java (97%) create mode 100644 hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperUtil.java rename {hbase-client => hbase-zookeeper}/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java (100%) rename hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java => hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMetrics.java (70%) rename {hbase-client => hbase-zookeeper}/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java (82%) rename hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java => hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKWatcher.java (89%) diff --git a/conf/log4j.properties b/conf/log4j.properties index 15545fff80..6367dbe7af 100644 --- a/conf/log4j.properties +++ b/conf/log4j.properties @@ -98,7 +98,7 @@ log4j.logger.org.apache.hadoop.hbase=INFO log4j.logger.org.apache.hadoop.hbase.META=INFO # Make these two classes INFO-level. Make them DEBUG to see more zk debug. log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO -log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO +log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKWatcher=INFO #log4j.logger.org.apache.hadoop.dfs=DEBUG # Set this class to log INFO only otherwise its OTT # Enable this to get detailed connection error/retry logging. diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py index a8abdab4fb..e7bf906546 100755 --- a/dev-support/findHangingTests.py +++ b/dev-support/findHangingTests.py @@ -86,7 +86,6 @@ def get_bad_tests(console_url): for bad_string in BAD_RUN_STRINGS: if re.match(".*" + bad_string + ".*", line): print "Bad string found in build:\n > {}".format(line) - return print "Result > total tests: {:4} failed : {:4} timedout : {:4} hanging : {:4}".format( len(all_tests_set), len(failed_tests_set), len(timeout_tests_set), len(hanging_tests_set)) return [all_tests_set, failed_tests_set, timeout_tests_set, hanging_tests_set] diff --git a/hbase-archetypes/hbase-client-project/src/main/resources/log4j.properties b/hbase-archetypes/hbase-client-project/src/main/resources/log4j.properties index 11f2b75880..0b01e57e6e 100644 --- a/hbase-archetypes/hbase-client-project/src/main/resources/log4j.properties +++ b/hbase-archetypes/hbase-client-project/src/main/resources/log4j.properties @@ -95,7 +95,7 @@ log4j.logger.org.apache.zookeeper=INFO log4j.logger.org.apache.hadoop.hbase=INFO # Make these two classes INFO-level. Make them DEBUG to see more zk debug. log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO -log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO +log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKWatcher=INFO #log4j.logger.org.apache.hadoop.dfs=DEBUG # Set this class to log INFO only otherwise its OTT # Enable this to get detailed connection error/retry logging. diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/resources/log4j.properties b/hbase-archetypes/hbase-shaded-client-project/src/main/resources/log4j.properties index 11f2b75880..0b01e57e6e 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/resources/log4j.properties +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/resources/log4j.properties @@ -95,7 +95,7 @@ log4j.logger.org.apache.zookeeper=INFO log4j.logger.org.apache.hadoop.hbase=INFO # Make these two classes INFO-level. Make them DEBUG to see more zk debug. log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO -log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO +log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKWatcher=INFO #log4j.logger.org.apache.hadoop.dfs=DEBUG # Set this class to log INFO only otherwise its OTT # Enable this to get detailed connection error/retry logging. diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 18b63b5414..5d06e44d83 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -191,6 +191,10 @@ hbase-it test-jar + + org.apache.hbase + hbase-zookeeper + org.apache.hbase hbase-server diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml index a66237bf0d..c2e1480572 100644 --- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml +++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml @@ -51,6 +51,7 @@ org.apache.hbase:hbase-external-blockcache org.apache.hbase:hbase-backup org.apache.hbase:hbase-mapreduce + org.apache.hbase:hbase-zookeeper diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index a8e73c7cb5..789fc13f3c 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -102,6 +102,10 @@ org.apache.hbase hbase-common + + org.apache.hbase + hbase-zookeeper + org.apache.hbase hbase-hadoop-compat @@ -285,7 +289,7 @@ com.sun.jersey jersey-json - + javax.servlet servlet-api diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 4286afe4c0..9d78faa69b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -79,7 +79,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; * Read/write operations on region and assignment information store in * hbase:meta. * - * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason + * Some of the methods of this class take ZKWatcher as a param. The only reason * for this is because when used on client-side (like from HBaseAdmin), we want to use * short-living connection (opened before each operation, closed right after), while * when used on HM or HRS (like in AssignmentManager) we want permanent connection. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index a931b1dcb9..9c231ddadf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index aa696121c3..8b3bf56744 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; @@ -73,8 +73,8 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -535,11 +535,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return this.conf; } - private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) + private void checkIfBaseNodeAvailable(ZKWatcher zkw) throws MasterNotRunningException { String errorMsg; try { - if (ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode) == -1) { + if (ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.baseZNode) == -1) { errorMsg = "The node " + zkw.znodePaths.baseZNode+" is not in ZooKeeper. " + "It should have been written by the master. " + "Check the value configured in 'zookeeper.znode.parent'. " @@ -1216,7 +1216,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { private AtomicInteger keepAliveZookeeperUserCount = new AtomicInteger(0); /** - * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have finished with it. + * Retrieve a shared ZKWatcher. You must close it it once you've have finished with it. * @return The shared instance. Never returns null. */ ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher() @@ -1227,7 +1227,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { throw new IOException(toString() + " closed"); } // We don't check that our link to ZooKeeper is still valid - // But there is a retry mechanism in the ZooKeeperWatcher itself + // But there is a retry mechanism in the ZKWatcher itself keepAliveZookeeper = new ZooKeeperKeepAliveConnection(conf, this.toString(), this); } keepAliveZookeeperUserCount.addAndGet(1); @@ -1235,7 +1235,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } } - void releaseZooKeeperWatcher(final ZooKeeperWatcher zkw) { + void releaseZooKeeperWatcher(final ZKWatcher zkw) { if (zkw == null){ return; } @@ -1246,7 +1246,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable { if (keepAliveZookeeper != null) { LOG.info("Closing zookeeper sessionid=0x" + Long.toHexString( - keepAliveZookeeper.getRecoverableZooKeeper().getSessionId())); + keepAliveZookeeper.getRecoverableZK().getSessionId())); keepAliveZookeeper.internalClose(); keepAliveZookeeper = null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index e4bb675bb4..7c87b5b894 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -69,7 +69,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; import org.apache.hadoop.hbase.client.replication.TableCFs; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -97,7 +96,8 @@ import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -398,8 +398,8 @@ public class HBaseAdmin implements Admin { @Override public List getRegions(final TableName tableName) throws IOException { - ZooKeeperWatcher zookeeper = - new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), + ZKWatcher zookeeper = + new ZKWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable()); try { if (TableName.META_TABLE_NAME.equals(tableName)) { @@ -1298,10 +1298,10 @@ public class HBaseAdmin implements Admin { break; case NORMAL: default: - ZooKeeperWatcher zookeeper = null; + ZKWatcher zookeeper = null; try { checkTableExists(tableName); - zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), + zookeeper = new ZKWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable()); List> pairs; if (TableName.META_TABLE_NAME.equals(tableName)) { @@ -1817,10 +1817,10 @@ public class HBaseAdmin implements Admin { @Override public void split(final TableName tableName, final byte [] splitPoint) throws IOException { - ZooKeeperWatcher zookeeper = null; + ZKWatcher zookeeper = null; try { checkTableExists(tableName); - zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), + zookeeper = new ZKWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable()); List> pairs; if (TableName.META_TABLE_NAME.equals(tableName)) { @@ -2321,7 +2321,7 @@ public class HBaseAdmin implements Admin { * @param conf system configuration * @throws MasterNotRunningException if the master is not running. * @throws ZooKeeperConnectionException if unable to connect to zookeeper. - * // TODO do not expose ZKConnectionException. + * // TODO do not expose ZooKeeperConnectionException. */ public static void available(final Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { @@ -2341,7 +2341,7 @@ public class HBaseAdmin implements Admin { // This is NASTY. FIX!!!! Dependent on internal implementation! TODO zkw = ((ConnectionImplementation) connection) .getKeepAliveZooKeeperWatcher(); - zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.znodePaths.baseZNode, false); + zkw.getRecoverableZK().getZooKeeper().exists(zkw.znodePaths.baseZNode, false); } catch (IOException e) { throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); } catch (InterruptedException e) { @@ -3290,11 +3290,11 @@ public class HBaseAdmin implements Admin { break; case NORMAL: default: - ZooKeeperWatcher zookeeper = null; + ZKWatcher zookeeper = null; try { List> pairs; if (TableName.META_TABLE_NAME.equals(tableName)) { - zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), + zookeeper = new ZKWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(), new ThrowableAbortable()); pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper); } else { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 0835a9b87e..10fc671cc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -26,7 +26,6 @@ import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; @@ -82,7 +81,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; @@ -156,8 +154,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedu import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 0eb4e42214..0955fffb85 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -79,7 +79,7 @@ public interface RegionInfo { int MD5_HEX_LENGTH = 32; @InterfaceAudience.Private - int DEFAULT_REPLICA_ID = 0; + int DEFAULT_REPLICA_ID = HConstants.DEFAULT_REPLICA_ID; /** * to keep appended int's sorted in string format. Only allows 2 bytes diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java index 259b6654ba..5ab5cffa81 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKAsyncRegistry.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.hbase.HRegionInfo.FIRST_META_REGIONINFO; import static org.apache.hadoop.hbase.client.RegionReplicaUtil.getRegionInfoForDefaultReplica; import static org.apache.hadoop.hbase.client.RegionReplicaUtil.getRegionInfoForReplica; import static org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.lengthOfPBMagic; -import static org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper.removeMetaData; +import static org.apache.hadoop.hbase.zookeeper.RecoverableZK.removeMetaData; import java.io.IOException; import java.util.concurrent.CompletableFuture; @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZKNodePaths; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; @@ -50,7 +51,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.apache.zookeeper.data.Stat; /** @@ -63,10 +63,10 @@ class ZKAsyncRegistry implements AsyncRegistry { private final CuratorFramework zk; - private final ZNodePaths znodePaths; + private final ZKNodePaths znodePaths; ZKAsyncRegistry(Configuration conf) { - this.znodePaths = new ZNodePaths(conf); + this.znodePaths = new ZKNodePaths(conf); int zkSessionTimeout = conf.getInt(ZK_SESSION_TIMEOUT, DEFAULT_ZK_SESSION_TIMEOUT); int zkRetry = conf.getInt("zookeeper.recovery.retry", 3); int zkRetryIntervalMs = conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java index 34f7b236ed..f29b46204a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java @@ -23,20 +23,21 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; /** - * We inherit the current ZooKeeperWatcher implementation to change the semantic + * We inherit the current ZKWatcher implementation to change the semantic * of the close: the new close won't immediately close the connection but * will have a keep alive. See {@link ConnectionImplementation}. * This allows to make it available with a consistent interface. The whole - * ZooKeeperWatcher use in ConnectionImplementation will be then changed to remove the + * ZKWatcher use in ConnectionImplementation will be then changed to remove the * watcher part. * * This class is intended to be used internally by HBase classes; but not by * final user code. Hence it's package protected. */ -class ZooKeeperKeepAliveConnection extends ZooKeeperWatcher{ +class ZooKeeperKeepAliveConnection extends ZKWatcher { ZooKeeperKeepAliveConnection( Configuration conf, String descriptor, ConnectionImplementation conn) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java index 746382f850..2d17a9ee98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -119,7 +119,7 @@ class ZooKeeperRegistry implements Registry { try { // We go to zk rather than to master to get count of regions to avoid // HTable having a Master dependency. See HBase-2828 - return ZKUtil.getNumberOfChildren(zkw, zkw.znodePaths.rsZNode); + return ZooKeeperUtil.getNumberOfChildren(zkw, zkw.znodePaths.rsZNode); } catch (KeeperException ke) { throw new IOException("Unexpected ZooKeeper exception", ke); } finally { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java index 4f0e5e65d0..532bc06435 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcUtils.java @@ -27,10 +27,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; @@ -124,7 +124,7 @@ public final class CoprocessorRpcUtils { throws IOException { Message.Builder builderForType = service.getRequestPrototype(methodDesc).newBuilderForType(); - org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builderForType, + ProtobufUtil.mergeFrom(builderForType, // TODO: COPY FROM SHADED TO NON_SHADED. DO I HAVE TOO? shadedRequest.toByteArray()); return builderForType.build(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java index 1374ab0430..b6bca2be74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java @@ -24,13 +24,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.DynamicClassLoader; import org.apache.hadoop.ipc.RemoteException; /** * A {@link RemoteException} with some extra information. If source exception - * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException}, + * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException}, * {@link #isDoNotRetry()} will return true. *

A {@link RemoteException} hosts exceptions we got from the server. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 0363ba269d..94c191f94f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index c7450b4a71..c8b3c093e5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -43,8 +43,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ByteBufferCell; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; @@ -81,7 +79,6 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLoadStats; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.SnapshotDescription; @@ -287,7 +284,7 @@ public final class ProtobufUtil { * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ public static byte [] prependPBMagic(final byte [] bytes) { - return Bytes.add(ProtobufMagic.PB_MAGIC, bytes); + return ProtobufMagic.prependPBMagic(bytes); } /** @@ -313,10 +310,7 @@ public final class ProtobufUtil { * @throws DeserializationException if we are missing the pb magic prefix */ public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException { - if (!isPBMagicPrefix(bytes)) { - throw new DeserializationException("Missing pb magic " + - Bytes.toString(ProtobufMagic.PB_MAGIC) + " prefix"); - } + ProtobufMagic.expectPBMagicPrefix(bytes); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java index 04e26629f3..5e1944193d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java @@ -41,17 +41,17 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferE * NodeDeleted and NodeCreated events on * /master. *

- * Utilizes {@link ZooKeeperNodeTracker} for zk interactions. + * Utilizes {@link ZKNodeTracker} for zk interactions. *

* You can get the current master via {@link #getMasterAddress()} or via - * {@link #getMasterAddress(ZooKeeperWatcher)} if you do not have a running + * {@link #getMasterAddress(ZKWatcher)} if you do not have a running * instance of this Tracker in your context. *

* This class also includes utility for interacting with the master znode, for * writing and reading the znode content. */ @InterfaceAudience.Private -public class MasterAddressTracker extends ZooKeeperNodeTracker { +public class MasterAddressTracker extends ZKNodeTracker { /** * Construct a master address listener with the specified * zookeeper reference. @@ -63,7 +63,7 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * @param watcher zk reference and watcher * @param abortable abortable in case of fatal error */ - public MasterAddressTracker(ZooKeeperWatcher watcher, Abortable abortable) { + public MasterAddressTracker(ZKWatcher watcher, Abortable abortable) { super(watcher, watcher.znodePaths.masterAddressZNode, abortable); } @@ -100,10 +100,10 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * @return info port or 0 if timed out or exceptions */ public int getBackupMasterInfoPort(final ServerName sn) { - String backupZNode = ZKUtil.joinZNode(watcher.znodePaths.backupMasterAddressesZNode, + String backupZNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.backupMasterAddressesZNode, sn.toString()); try { - byte[] data = ZKUtil.getData(watcher, backupZNode); + byte[] data = ZooKeeperUtil.getData(watcher, backupZNode); final ZooKeeperProtos.Master backup = parse(data); if (backup == null) { return 0; @@ -136,17 +136,17 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * Get master address. * Use this instead of {@link #getMasterAddress()} if you do not have an * instance of this tracker in your context. - * @param zkw ZooKeeperWatcher to use + * @param zkw ZKWatcher to use * @return ServerName stored in the the master address znode or null if no * znode present. - * @throws KeeperException - * @throws IOException + * @throws KeeperException + * @throws IOException */ - public static ServerName getMasterAddress(final ZooKeeperWatcher zkw) + public static ServerName getMasterAddress(final ZKWatcher zkw) throws KeeperException, IOException { byte [] data; try { - data = ZKUtil.getData(zkw, zkw.znodePaths.masterAddressZNode); + data = ZooKeeperUtil.getData(zkw, zkw.znodePaths.masterAddressZNode); } catch (InterruptedException e) { throw new InterruptedIOException(); } @@ -167,18 +167,18 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * Get master info port. * Use this instead of {@link #getMasterInfoPort()} if you do not have an * instance of this tracker in your context. - * @param zkw ZooKeeperWatcher to use + * @param zkw ZKWatcher to use * @return master info port in the the master address znode or null if no * znode present. * // TODO can't return null for 'int' return type. non-static verison returns 0 * @throws KeeperException * @throws IOException */ - public static int getMasterInfoPort(final ZooKeeperWatcher zkw) throws KeeperException, + public static int getMasterInfoPort(final ZKWatcher zkw) throws KeeperException, IOException { byte[] data; try { - data = ZKUtil.getData(zkw, zkw.znodePaths.masterAddressZNode); + data = ZooKeeperUtil.getData(zkw, zkw.znodePaths.masterAddressZNode); } catch (InterruptedException e) { throw new InterruptedIOException(); } @@ -199,17 +199,17 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * Set master address into the master znode or into the backup * subdirectory of backup masters; switch off the passed in znode * path. - * @param zkw The ZooKeeperWatcher to use. + * @param zkw The ZKWatcher to use. * @param znode Where to create the znode; could be at the top level or it * could be under backup masters * @param master ServerName of the current master must not be null. * @return true if node created, false if not; a watch is set in both cases * @throws KeeperException */ - public static boolean setMasterAddress(final ZooKeeperWatcher zkw, + public static boolean setMasterAddress(final ZKWatcher zkw, final String znode, final ServerName master, int infoPort) throws KeeperException { - return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master, infoPort)); + return ZooKeeperUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master, infoPort)); } /** @@ -258,17 +258,17 @@ public class MasterAddressTracker extends ZooKeeperNodeTracker { * @param zkw must not be null * @param content must not be null */ - public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) { + public static boolean deleteIfEquals(ZKWatcher zkw, final String content) { if (content == null){ throw new IllegalArgumentException("Content must not be null"); } try { Stat stat = new Stat(); - byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.znodePaths.masterAddressZNode, stat); + byte[] data = ZooKeeperUtil.getDataNoWatch(zkw, zkw.znodePaths.masterAddressZNode, stat); ServerName sn = ProtobufUtil.parseServerNameFrom(data); if (sn != null && content.equals(sn.toString())) { - return (ZKUtil.deleteNode(zkw, zkw.znodePaths.masterAddressZNode, stat.getVersion())); + return (ZooKeeperUtil.deleteNode(zkw, zkw.znodePaths.masterAddressZNode, stat.getVersion())); } } catch (KeeperException e) { LOG.warn("Can't get or delete the master znode", e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index cddde2f521..7ff48b84c3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -65,7 +65,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaReg * which keeps hbase:meta region server location. * * Stateless class with a bunch of static methods. Doesn't manage resources passed in - * (e.g. Connection, ZooKeeperWatcher etc). + * (e.g. Connection, ZKWatcher etc). * * Meta region location is set by RegionServerServices. * This class doesn't use ZK watchers, rather accesses ZK directly. @@ -86,7 +86,7 @@ public class MetaTableLocator { * Checks if the meta region location is available. * @return true if meta region location is available, false if not */ - public boolean isLocationAvailable(ZooKeeperWatcher zkw) { + public boolean isLocationAvailable(ZKWatcher zkw) { return getMetaRegionLocation(zkw) != null; } @@ -94,7 +94,7 @@ public class MetaTableLocator { * @param zkw ZooKeeper watcher to be used * @return meta table regions and their locations. */ - public List> getMetaRegionsAndLocations(ZooKeeperWatcher zkw) { + public List> getMetaRegionsAndLocations(ZKWatcher zkw) { return getMetaRegionsAndLocations(zkw, RegionInfo.DEFAULT_REPLICA_ID); } @@ -104,7 +104,7 @@ public class MetaTableLocator { * @param replicaId * @return meta table regions and their locations. */ - public List> getMetaRegionsAndLocations(ZooKeeperWatcher zkw, + public List> getMetaRegionsAndLocations(ZKWatcher zkw, int replicaId) { ServerName serverName = getMetaRegionLocation(zkw, replicaId); List> list = new ArrayList<>(1); @@ -117,7 +117,7 @@ public class MetaTableLocator { * @param zkw ZooKeeper watcher to be used * @return List of meta regions */ - public List getMetaRegions(ZooKeeperWatcher zkw) { + public List getMetaRegions(ZKWatcher zkw) { return getMetaRegions(zkw, RegionInfo.DEFAULT_REPLICA_ID); } @@ -127,7 +127,7 @@ public class MetaTableLocator { * @param replicaId * @return List of meta regions */ - public List getMetaRegions(ZooKeeperWatcher zkw, int replicaId) { + public List getMetaRegions(ZKWatcher zkw, int replicaId) { List> result; result = getMetaRegionsAndLocations(zkw, replicaId); return getListOfRegionInfos(result); @@ -148,7 +148,7 @@ public class MetaTableLocator { * @param zkw zookeeper connection to use * @return server name or null if we failed to get the data. */ - public ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw) { + public ServerName getMetaRegionLocation(final ZKWatcher zkw) { try { RegionState state = getMetaRegionState(zkw); return state.isOpened() ? state.getServerName() : null; @@ -163,7 +163,7 @@ public class MetaTableLocator { * @param replicaId * @return server name */ - public ServerName getMetaRegionLocation(final ZooKeeperWatcher zkw, int replicaId) { + public ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaId) { try { RegionState state = getMetaRegionState(zkw, replicaId); return state.isOpened() ? state.getServerName() : null; @@ -184,7 +184,7 @@ public class MetaTableLocator { * @throws InterruptedException if interrupted while waiting * @throws NotAllMetaRegionsOnlineException */ - public ServerName waitMetaRegionLocation(ZooKeeperWatcher zkw, long timeout) + public ServerName waitMetaRegionLocation(ZKWatcher zkw, long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException { return waitMetaRegionLocation(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout); } @@ -202,10 +202,10 @@ public class MetaTableLocator { * @throws InterruptedException * @throws NotAllMetaRegionsOnlineException */ - public ServerName waitMetaRegionLocation(ZooKeeperWatcher zkw, int replicaId, long timeout) + public ServerName waitMetaRegionLocation(ZKWatcher zkw, int replicaId, long timeout) throws InterruptedException, NotAllMetaRegionsOnlineException { try { - if (ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode) == -1) { + if (ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.baseZNode) == -1) { String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " + "There could be a mismatch with the one configured in the master."; LOG.error(errorMsg); @@ -227,10 +227,10 @@ public class MetaTableLocator { * Waits indefinitely for availability of hbase:meta. Used during * cluster startup. Does not verify meta, just that something has been * set up in zk. - * @see #waitMetaRegionLocation(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher, long) + * @see #waitMetaRegionLocation(ZKWatcher, long) * @throws InterruptedException if interrupted while waiting */ - public void waitMetaRegionLocation(ZooKeeperWatcher zkw) throws InterruptedException { + public void waitMetaRegionLocation(ZKWatcher zkw) throws InterruptedException { long startTime = System.currentTimeMillis(); while (!stopped) { try { @@ -260,7 +260,7 @@ public class MetaTableLocator { * @throws InterruptedException */ public boolean verifyMetaRegionLocation(ClusterConnection hConnection, - ZooKeeperWatcher zkw, final long timeout) + ZKWatcher zkw, final long timeout) throws InterruptedException, IOException { return verifyMetaRegionLocation(hConnection, zkw, timeout, RegionInfo.DEFAULT_REPLICA_ID); } @@ -276,7 +276,7 @@ public class MetaTableLocator { * @throws IOException */ public boolean verifyMetaRegionLocation(ClusterConnection connection, - ZooKeeperWatcher zkw, final long timeout, int replicaId) + ZKWatcher zkw, final long timeout, int replicaId) throws InterruptedException, IOException { AdminProtos.AdminService.BlockingInterface service = null; try { @@ -360,7 +360,7 @@ public class MetaTableLocator { * @throws IOException */ private AdminService.BlockingInterface getMetaServerConnection(ClusterConnection connection, - ZooKeeperWatcher zkw, long timeout, int replicaId) + ZKWatcher zkw, long timeout, int replicaId) throws InterruptedException, NotAllMetaRegionsOnlineException, IOException { return getCachedConnection(connection, waitMetaRegionLocation(zkw, replicaId, timeout)); } @@ -424,7 +424,7 @@ public class MetaTableLocator { * @param state The region transition state * @throws KeeperException unexpected zookeeper exception */ - public static void setMetaLocation(ZooKeeperWatcher zookeeper, + public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, RegionState.State state) throws KeeperException { setMetaLocation(zookeeper, serverName, RegionInfo.DEFAULT_REPLICA_ID, state); } @@ -438,7 +438,7 @@ public class MetaTableLocator { * @param state * @throws KeeperException */ - public static void setMetaLocation(ZooKeeperWatcher zookeeper, + public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, int replicaId, RegionState.State state) throws KeeperException { if (serverName == null) { LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required"); @@ -454,7 +454,7 @@ public class MetaTableLocator { .setState(state.convert()).build(); byte[] data = ProtobufUtil.prependPBMagic(pbrsr.toByteArray()); try { - ZKUtil.setData(zookeeper, + ZooKeeperUtil.setData(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data); } catch(KeeperException.NoNodeException nne) { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { @@ -463,14 +463,15 @@ public class MetaTableLocator { LOG.debug("META region location doesn't exist for replicaId=" + replicaId + ", create it"); } - ZKUtil.createAndWatch(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data); + ZooKeeperUtil + .createAndWatch(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId), data); } } /** * Load the meta region state from the meta server ZNode. */ - public static RegionState getMetaRegionState(ZooKeeperWatcher zkw) throws KeeperException { + public static RegionState getMetaRegionState(ZKWatcher zkw) throws KeeperException { return getMetaRegionState(zkw, RegionInfo.DEFAULT_REPLICA_ID); } @@ -481,12 +482,12 @@ public class MetaTableLocator { * @return regionstate * @throws KeeperException */ - public static RegionState getMetaRegionState(ZooKeeperWatcher zkw, int replicaId) + public static RegionState getMetaRegionState(ZKWatcher zkw, int replicaId) throws KeeperException { RegionState.State state = RegionState.State.OPEN; ServerName serverName = null; try { - byte[] data = ZKUtil.getData(zkw, zkw.znodePaths.getZNodeForReplica(replicaId)); + byte[] data = ZooKeeperUtil.getData(zkw, zkw.znodePaths.getZNodeForReplica(replicaId)); if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); @@ -507,7 +508,7 @@ public class MetaTableLocator { serverName = ProtobufUtil.parseServerNameFrom(data); } } catch (DeserializationException e) { - throw ZKUtil.convert(e); + throw ZooKeeperUtil.convert(e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } @@ -524,12 +525,12 @@ public class MetaTableLocator { * @param zookeeper zookeeper reference * @throws KeeperException unexpected zookeeper exception */ - public void deleteMetaLocation(ZooKeeperWatcher zookeeper) + public void deleteMetaLocation(ZKWatcher zookeeper) throws KeeperException { deleteMetaLocation(zookeeper, RegionInfo.DEFAULT_REPLICA_ID); } - public void deleteMetaLocation(ZooKeeperWatcher zookeeper, int replicaId) + public void deleteMetaLocation(ZKWatcher zookeeper, int replicaId) throws KeeperException { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { LOG.info("Deleting hbase:meta region location in ZooKeeper"); @@ -538,7 +539,7 @@ public class MetaTableLocator { } try { // Just delete the node. Don't need any watches. - ZKUtil.deleteNode(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId)); + ZooKeeperUtil.deleteNode(zookeeper, zookeeper.znodePaths.getZNodeForReplica(replicaId)); } catch(KeeperException.NoNodeException nne) { // Has already been deleted } @@ -552,7 +553,7 @@ public class MetaTableLocator { * @return ServerName or null if we timed out. * @throws InterruptedException */ - public List blockUntilAvailable(final ZooKeeperWatcher zkw, + public List blockUntilAvailable(final ZKWatcher zkw, final long timeout, Configuration conf) throws InterruptedException { int numReplicasConfigured = 1; @@ -584,7 +585,7 @@ public class MetaTableLocator { * @return ServerName or null if we timed out. * @throws InterruptedException */ - public ServerName blockUntilAvailable(final ZooKeeperWatcher zkw, + public ServerName blockUntilAvailable(final ZKWatcher zkw, final long timeout) throws InterruptedException { return blockUntilAvailable(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout); @@ -598,7 +599,7 @@ public class MetaTableLocator { * @return ServerName or null if we timed out. * @throws InterruptedException */ - public ServerName blockUntilAvailable(final ZooKeeperWatcher zkw, int replicaId, + public ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId, final long timeout) throws InterruptedException { if (timeout < 0) throw new IllegalArgumentException(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java index 9ef7691806..9a978d375f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKClusterId.java @@ -35,11 +35,11 @@ import org.apache.zookeeper.KeeperException; */ @InterfaceAudience.Private public class ZKClusterId { - private ZooKeeperWatcher watcher; + private ZKWatcher watcher; private Abortable abortable; private String id; - public ZKClusterId(ZooKeeperWatcher watcher, Abortable abortable) { + public ZKClusterId(ZKWatcher watcher, Abortable abortable) { this.watcher = watcher; this.abortable = abortable; } @@ -60,12 +60,12 @@ public class ZKClusterId { return id; } - public static String readClusterIdZNode(ZooKeeperWatcher watcher) + public static String readClusterIdZNode(ZKWatcher watcher) throws KeeperException { - if (ZKUtil.checkExists(watcher, watcher.znodePaths.clusterIdZNode) != -1) { + if (ZooKeeperUtil.checkExists(watcher, watcher.znodePaths.clusterIdZNode) != -1) { byte [] data; try { - data = ZKUtil.getData(watcher, watcher.znodePaths.clusterIdZNode); + data = ZooKeeperUtil.getData(watcher, watcher.znodePaths.clusterIdZNode); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; @@ -74,16 +74,16 @@ public class ZKClusterId { try { return ClusterId.parseFrom(data).toString(); } catch (DeserializationException e) { - throw ZKUtil.convert(e); + throw ZooKeeperUtil.convert(e); } } } return null; } - public static void setClusterId(ZooKeeperWatcher watcher, ClusterId id) + public static void setClusterId(ZKWatcher watcher, ClusterId id) throws KeeperException { - ZKUtil.createSetData(watcher, watcher.znodePaths.clusterIdZNode, id.toByteArray()); + ZooKeeperUtil.createSetData(watcher, watcher.znodePaths.clusterIdZNode, id.toByteArray()); } /** @@ -92,7 +92,7 @@ public class ZKClusterId { * @return the UUID read from zookeeper * @throws KeeperException */ - public static UUID getUUIDForCluster(ZooKeeperWatcher zkw) throws KeeperException { + public static UUID getUUIDForCluster(ZKWatcher zkw) throws KeeperException { String uuid = readClusterIdZNode(zkw); return uuid == null ? null : UUID.fromString(uuid); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 060792fd21..e4ea7121e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -18,33 +18,18 @@ */ package org.apache.hadoop.hbase.zookeeper; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; -import java.io.PrintWriter; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Deque; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; -import javax.security.auth.login.AppConfigurationEntry; -import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; - -import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -52,30 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.DeleteNodeFailSilent; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.SetData; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.util.KerberosUtil; -import org.apache.zookeeper.AsyncCallback; -import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.KeeperException.NoNodeException; -import org.apache.zookeeper.Op; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs.Ids; -import org.apache.zookeeper.ZooDefs.Perms; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.client.ZooKeeperSaslClient; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; -import org.apache.zookeeper.data.Stat; -import org.apache.zookeeper.proto.CreateRequest; -import org.apache.zookeeper.proto.DeleteRequest; -import org.apache.zookeeper.proto.SetDataRequest; -import org.apache.zookeeper.server.ZooKeeperSaslServer; /** * Internal HBase utility class for ZooKeeper. @@ -89,1623 +51,13 @@ import org.apache.zookeeper.server.ZooKeeperSaslServer; @InterfaceAudience.Private public class ZKUtil { private static final Log LOG = LogFactory.getLog(ZKUtil.class); - - // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. - public static final char ZNODE_PATH_SEPARATOR = '/'; - private static int zkDumpConnectionTimeOut; - - /** - * Creates a new connection to ZooKeeper, pulling settings and ensemble config - * from the specified configuration object using methods from {@link ZKConfig}. - * - * Sets the connection status monitoring watcher to the specified watcher. - * - * @param conf configuration to pull ensemble and other settings from - * @param watcher watcher to monitor connection changes - * @return connection to zookeeper - * @throws IOException if unable to connect to zk or config problem - */ - public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) - throws IOException { - String ensemble = ZKConfig.getZKQuorumServersString(conf); - return connect(conf, ensemble, watcher); - } - - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, - Watcher watcher) - throws IOException { - return connect(conf, ensemble, watcher, null); - } - - public static RecoverableZooKeeper connect(Configuration conf, String ensemble, - Watcher watcher, final String identifier) - throws IOException { - if(ensemble == null) { - throw new IOException("Unable to determine ZooKeeper ensemble"); - } - int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT); - if (LOG.isTraceEnabled()) { - LOG.trace(identifier + " opening connection to ZooKeeper ensemble=" + ensemble); - } - int retry = conf.getInt("zookeeper.recovery.retry", 3); - int retryIntervalMillis = - conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); - int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000); - zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", - 1000); - return new RecoverableZooKeeper(ensemble, timeout, watcher, - retry, retryIntervalMillis, maxSleepTime, identifier); - } - - /** - * Log in the current zookeeper server process using the given configuration - * keys for the credential file and login principal. - * - *

This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. - *

- * - * @param conf The configuration data to use - * @param keytabFileKey Property key used to configure the path to the credential file - * @param userNameKey Property key used to configure the login principal - * @param hostname Current hostname to use in any credentials - * @throws IOException underlying exception from SecurityUtil.login() call - */ - public static void loginServer(Configuration conf, String keytabFileKey, - String userNameKey, String hostname) throws IOException { - login(conf, keytabFileKey, userNameKey, hostname, - ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, - JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); - } - - /** - * Log in the current zookeeper client using the given configuration - * keys for the credential file and login principal. - * - *

This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. - *

- * - * @param conf The configuration data to use - * @param keytabFileKey Property key used to configure the path to the credential file - * @param userNameKey Property key used to configure the login principal - * @param hostname Current hostname to use in any credentials - * @throws IOException underlying exception from SecurityUtil.login() call - */ - public static void loginClient(Configuration conf, String keytabFileKey, - String userNameKey, String hostname) throws IOException { - login(conf, keytabFileKey, userNameKey, hostname, - ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, - JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); - } - - /** - * Log in the current process using the given configuration keys for the - * credential file and login principal. - * - *

This is only applicable when running on secure hbase - * On regular HBase (without security features), this will safely be ignored. - *

- * - * @param conf The configuration data to use - * @param keytabFileKey Property key used to configure the path to the credential file - * @param userNameKey Property key used to configure the login principal - * @param hostname Current hostname to use in any credentials - * @param loginContextProperty property name to expose the entry name - * @param loginContextName jaas entry name - * @throws IOException underlying exception from SecurityUtil.login() call - */ - private static void login(Configuration conf, String keytabFileKey, - String userNameKey, String hostname, - String loginContextProperty, String loginContextName) - throws IOException { - if (!isSecureZooKeeper(conf)) - return; - - // User has specified a jaas.conf, keep this one as the good one. - // HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf" - if (System.getProperty("java.security.auth.login.config") != null) - return; - - // No keytab specified, no auth - String keytabFilename = conf.get(keytabFileKey); - if (keytabFilename == null) { - LOG.warn("no keytab specified for: " + keytabFileKey); - return; - } - - String principalConfig = conf.get(userNameKey, System.getProperty("user.name")); - String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname); - - // Initialize the "jaas.conf" for keyTab/principal, - // If keyTab is not specified use the Ticket Cache. - // and set the zookeeper login context name. - JaasConfiguration jaasConf = new JaasConfiguration(loginContextName, - principalName, keytabFilename); - javax.security.auth.login.Configuration.setConfiguration(jaasConf); - System.setProperty(loginContextProperty, loginContextName); - } - - /** - * A JAAS configuration that defines the login modules that we want to use for login. - */ - private static class JaasConfiguration extends javax.security.auth.login.Configuration { - private static final String SERVER_KEYTAB_KERBEROS_CONFIG_NAME = - "zookeeper-server-keytab-kerberos"; - private static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = - "zookeeper-client-keytab-kerberos"; - - private static final Map BASIC_JAAS_OPTIONS = new HashMap<>(); - static { - String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG"); - if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { - BASIC_JAAS_OPTIONS.put("debug", "true"); - } - } - - private static final Map KEYTAB_KERBEROS_OPTIONS = new HashMap<>(); - static { - KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); - KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); - KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true"); - KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); - } - - private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = - new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), - LoginModuleControlFlag.REQUIRED, - KEYTAB_KERBEROS_OPTIONS); - - private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF = - new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN}; - - private javax.security.auth.login.Configuration baseConfig; - private final String loginContextName; - private final boolean useTicketCache; - private final String keytabFile; - private final String principal; - - public JaasConfiguration(String loginContextName, String principal, String keytabFile) { - this(loginContextName, principal, keytabFile, keytabFile == null || keytabFile.length() == 0); - } - - private JaasConfiguration(String loginContextName, String principal, - String keytabFile, boolean useTicketCache) { - try { - this.baseConfig = javax.security.auth.login.Configuration.getConfiguration(); - } catch (SecurityException e) { - this.baseConfig = null; - } - this.loginContextName = loginContextName; - this.useTicketCache = useTicketCache; - this.keytabFile = keytabFile; - this.principal = principal; - LOG.info("JaasConfiguration loginContextName=" + loginContextName + - " principal=" + principal + " useTicketCache=" + useTicketCache + - " keytabFile=" + keytabFile); - } - - @Override - public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { - if (loginContextName.equals(appName)) { - if (!useTicketCache) { - KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); - KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true"); - } - KEYTAB_KERBEROS_OPTIONS.put("principal", principal); - KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", useTicketCache ? "true" : "false"); - return KEYTAB_KERBEROS_CONF; - } - if (baseConfig != null) return baseConfig.getAppConfigurationEntry(appName); - return(null); - } - } - - // - // Helper methods - // - - /** - * Join the prefix znode name with the suffix znode name to generate a proper - * full znode name. - * - * Assumes prefix does not end with slash and suffix does not begin with it. - * - * @param prefix beginning of znode name - * @param suffix ending of znode name - * @return result of properly joining prefix with suffix - */ - public static String joinZNode(String prefix, String suffix) { - return prefix + ZNODE_PATH_SEPARATOR + suffix; - } - - /** - * Returns the full path of the immediate parent of the specified node. - * @param node path to get parent of - * @return parent of path, null if passed the root node or an invalid node - */ - public static String getParent(String node) { - int idx = node.lastIndexOf(ZNODE_PATH_SEPARATOR); - return idx <= 0 ? null : node.substring(0, idx); - } - - /** - * Get the name of the current node from the specified fully-qualified path. - * @param path fully-qualified path - * @return name of the current node - */ - public static String getNodeName(String path) { - return path.substring(path.lastIndexOf("/")+1); - } - - // - // Existence checks and watches - // - - /** - * Watch the specified znode for delete/create/change events. The watcher is - * set whether or not the node exists. If the node already exists, the method - * returns true. If the node does not exist, the method returns false. - * - * @param zkw zk reference - * @param znode path of node to watch - * @return true if znode exists, false if does not exist or error - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean watchAndCheckExists(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - Stat s = zkw.getRecoverableZooKeeper().exists(znode, zkw); - boolean exists = s != null ? true : false; - if (exists) { - LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode)); - } else { - LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode)); - } - return exists; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); - zkw.keeperException(e); - return false; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); - zkw.interruptedException(e); - return false; - } - } - - /** - * Watch the specified znode, but only if exists. Useful when watching - * for deletions. Uses .getData() (and handles NoNodeException) instead - * of .exists() to accomplish this, as .getData() will only set a watch if - * the znode exists. - * @param zkw zk reference - * @param znode path of node to watch - * @return true if the watch is set, false if node does not exists - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean setWatchIfNodeExists(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - zkw.getRecoverableZooKeeper().getData(znode, true, null); - return true; - } catch (NoNodeException e) { - return false; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); - zkw.interruptedException(e); - return false; - } - } - - /** - * Check if the specified node exists. Sets no watches. - * - * @param zkw zk reference - * @param znode path of node to watch - * @return version of the node if it exists, -1 if does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static int checkExists(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - Stat s = zkw.getRecoverableZooKeeper().exists(znode, null); - return s != null ? s.getVersion() : -1; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); - zkw.keeperException(e); - return -1; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); - zkw.interruptedException(e); - return -1; - } - } - - // - // Znode listings - // - - /** - * Lists the children znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * - * @param zkw zk reference - * @param znode path of node to list and watch children of - * @return list of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static List listChildrenAndWatchForNewChildren( - ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - List children = zkw.getRecoverableZooKeeper().getChildren(znode, zkw); - return children; - } catch(KeeperException.NoNodeException ke) { - LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + - "because node does not exist (not an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * List all the children of the specified znode, setting a watch for children - * changes and also setting a watch on every individual child in order to get - * the NodeCreated and NodeDeleted events. - * @param zkw zookeeper reference - * @param znode node to get children of and watch - * @return list of znode names, null if the node doesn't exist - * @throws KeeperException - */ - public static List listChildrenAndWatchThem(ZooKeeperWatcher zkw, - String znode) throws KeeperException { - List children = listChildrenAndWatchForNewChildren(zkw, znode); - if (children == null) { - return null; - } - for (String child : children) { - watchAndCheckExists(zkw, joinZNode(znode, child)); - } - return children; - } - - /** - * Lists the children of the specified znode without setting any watches. - * - * Sets no watches at all, this method is best effort. - * - * Returns an empty list if the node has no children. Returns null if the - * parent node itself does not exist. - * - * @param zkw zookeeper reference - * @param znode node to get children - * @return list of data of children of specified znode, empty if no children, - * null if parent does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static List listChildrenNoWatch(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - List children = null; - try { - // List the children without watching - children = zkw.getRecoverableZooKeeper().getChildren(znode, null); - } catch(KeeperException.NoNodeException nne) { - return null; - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - return children; - } - - /** - * Simple class to hold a node path and node data. - * @deprecated Unused - */ - @Deprecated - public static class NodeAndData { - private String node; - private byte [] data; - public NodeAndData(String node, byte [] data) { - this.node = node; - this.data = data; - } - public String getNode() { - return node; - } - public byte [] getData() { - return data; - } - @Override - public String toString() { - return node; - } - public boolean isEmpty() { - return (data == null || data.length == 0); - } - } - - /** - * Checks if the specified znode has any children. Sets no watches. - * - * Returns true if the node exists and has children. Returns false if the - * node does not exist or if the node does not have any children. - * - * Used during master initialization to determine if the master is a - * failed-over-to master or the first master during initial cluster startup. - * If the directory for regionserver ephemeral nodes is empty then this is - * a cluster startup, if not then it is not cluster startup. - * - * @param zkw zk reference - * @param znode path of node to check for children of - * @return true if node has children, false if not or node does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean nodeHasChildren(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - return !zkw.getRecoverableZooKeeper().getChildren(znode, null).isEmpty(); - } catch(KeeperException.NoNodeException ke) { - LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + - "because node does not exist (not an error)")); - return false; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); - zkw.keeperException(e); - return false; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); - zkw.interruptedException(e); - return false; - } - } - - /** - * Get the number of children of the specified node. - * - * If the node does not exist or has no children, returns 0. - * - * Sets no watches at all. - * - * @param zkw zk reference - * @param znode path of node to count children of - * @return number of children of specified node, 0 if none or parent does not - * exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static int getNumberOfChildren(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - try { - Stat stat = zkw.getRecoverableZooKeeper().exists(znode, null); - return stat == null ? 0 : stat.getNumChildren(); - } catch(KeeperException e) { - LOG.warn(zkw.prefix("Unable to get children of node " + znode)); - zkw.keeperException(e); - } catch(InterruptedException e) { - zkw.interruptedException(e); - } - return 0; - } - - // - // Data retrieval - // - - /** - * Get znode data. Does not set a watcher. - * @return ZNode data, null if the node does not exist or if there is an - * error. - */ - public static byte [] getData(ZooKeeperWatcher zkw, String znode) - throws KeeperException, InterruptedException { - try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, null); - logRetrievedMsg(zkw, znode, data, false); - return data; - } catch (KeeperException.NoNodeException e) { - LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.keeperException(e); - return null; - } - } - - /** - * Get the data at the specified znode and set a watch. - * - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * - * @param zkw zk reference - * @param znode path of node - * @return data of the specified znode, or null - * @throws KeeperException if unexpected zookeeper exception - */ - public static byte [] getDataAndWatch(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - return getDataInternal(zkw, znode, null, true); - } - - /** - * Get the data at the specified znode and set a watch. - * - * Returns the data and sets a watch if the node exists. Returns null and no - * watch is set if the node does not exist or there is an exception. - * - * @param zkw zk reference - * @param znode path of node - * @param stat object to populate the version of the znode - * @return data of the specified znode, or null - * @throws KeeperException if unexpected zookeeper exception - */ - public static byte[] getDataAndWatch(ZooKeeperWatcher zkw, String znode, - Stat stat) throws KeeperException { - return getDataInternal(zkw, znode, stat, true); - } - - private static byte[] getDataInternal(ZooKeeperWatcher zkw, String znode, Stat stat, - boolean watcherSet) - throws KeeperException { - try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, zkw, stat); - logRetrievedMsg(zkw, znode, data, watcherSet); - return data; - } catch (KeeperException.NoNodeException e) { - // This log can get pretty annoying when we cycle on 100ms waits. - // Enable trace if you really want to see it. - LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * Get the data at the specified znode without setting a watch. - * - * Returns the data if the node exists. Returns null if the node does not - * exist. - * - * Sets the stats of the node in the passed Stat object. Pass a null stat if - * not interested. - * - * @param zkw zk reference - * @param znode path of node - * @param stat node status to get if node exists - * @return data of the specified znode, or null if node does not exist - * @throws KeeperException if unexpected zookeeper exception - */ - public static byte [] getDataNoWatch(ZooKeeperWatcher zkw, String znode, - Stat stat) - throws KeeperException { - try { - byte [] data = zkw.getRecoverableZooKeeper().getData(znode, null, stat); - logRetrievedMsg(zkw, znode, data, false); - return data; - } catch (KeeperException.NoNodeException e) { - LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + - "because node does not exist (not necessarily an error)")); - return null; - } catch (KeeperException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.keeperException(e); - return null; - } catch (InterruptedException e) { - LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); - zkw.interruptedException(e); - return null; - } - } - - /** - * Returns the date of child znodes of the specified znode. Also sets a watch on - * the specified znode which will capture a NodeDeleted event on the specified - * znode as well as NodeChildrenChanged if any children of the specified znode - * are created or deleted. - * - * Returns null if the specified node does not exist. Otherwise returns a - * list of children of the specified node. If the node exists but it has no - * children, an empty list will be returned. - * - * @param zkw zk reference - * @param baseNode path of node to list and watch children of - * @return list of data of children of the specified node, an empty list if the node - * exists but has no children, and null if the node does not exist - * @throws KeeperException if unexpected zookeeper exception - * @deprecated Unused - */ - @Deprecated - public static List getChildDataAndWatchForNewChildren( - ZooKeeperWatcher zkw, String baseNode) throws KeeperException { - List nodes = - ZKUtil.listChildrenAndWatchForNewChildren(zkw, baseNode); - if (nodes != null) { - List newNodes = new ArrayList<>(); - for (String node : nodes) { - String nodePath = ZKUtil.joinZNode(baseNode, node); - byte[] data = ZKUtil.getDataAndWatch(zkw, nodePath); - newNodes.add(new NodeAndData(nodePath, data)); - } - return newNodes; - } - return null; - } - - /** - * Update the data of an existing node with the expected version to have the - * specified data. - * - * Throws an exception if there is a version mismatch or some other problem. - * - * Sets no watches under any conditions. - * - * @param zkw zk reference - * @param znode - * @param data - * @param expectedVersion - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.BadVersionException if version mismatch - * @deprecated Unused - */ - @Deprecated - public static void updateExistingNodeData(ZooKeeperWatcher zkw, String znode, - byte [] data, int expectedVersion) - throws KeeperException { - try { - zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion); - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - // - // Data setting - // - - /** - * Sets the data of the existing znode to be the specified data. Ensures that - * the current data has the specified expected version. - * - *

If the node does not exist, a {@link NoNodeException} will be thrown. - * - *

If their is a version mismatch, method returns null. - * - *

No watches are set but setting data will trigger other watchers of this - * node. - * - *

If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data to set for node - * @param expectedVersion version expected when setting data - * @return true if data set, false if version mismatch - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean setData(ZooKeeperWatcher zkw, String znode, - byte [] data, int expectedVersion) - throws KeeperException, KeeperException.NoNodeException { - try { - return zkw.getRecoverableZooKeeper().setData(znode, data, expectedVersion) != null; - } catch (InterruptedException e) { - zkw.interruptedException(e); - return false; - } - } - - /** - * Set data into node creating node if it doesn't yet exist. - * Does not set watch. - * - * @param zkw zk reference - * @param znode path of node - * @param data data to set for node - * @throws KeeperException - */ - public static void createSetData(final ZooKeeperWatcher zkw, final String znode, - final byte [] data) - throws KeeperException { - if (checkExists(zkw, znode) == -1) { - ZKUtil.createWithParents(zkw, znode, data); - } else { - ZKUtil.setData(zkw, znode, data); - } - } - - /** - * Sets the data of the existing znode to be the specified data. The node - * must exist but no checks are done on the existing data or version. - * - *

If the node does not exist, a {@link NoNodeException} will be thrown. - * - *

No watches are set but setting data will trigger other watchers of this - * node. - * - *

If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data to set for node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void setData(ZooKeeperWatcher zkw, String znode, byte [] data) - throws KeeperException, KeeperException.NoNodeException { - setData(zkw, (SetData)ZKUtilOp.setData(znode, data)); - } - - private static void setData(ZooKeeperWatcher zkw, SetData setData) - throws KeeperException, KeeperException.NoNodeException { - SetDataRequest sd = (SetDataRequest)toZooKeeperOp(zkw, setData).toRequestRecord(); - setData(zkw, sd.getPath(), sd.getData(), sd.getVersion()); - } - - /** - * Returns whether or not secure authentication is enabled - * (whether hbase.security.authentication is set to - * kerberos. - */ - public static boolean isSecureZooKeeper(Configuration conf) { - // Detection for embedded HBase client with jaas configuration - // defined for third party programs. - try { - javax.security.auth.login.Configuration testConfig = - javax.security.auth.login.Configuration.getConfiguration(); - if (testConfig.getAppConfigurationEntry("Client") == null - && testConfig.getAppConfigurationEntry( - JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME) == null - && testConfig.getAppConfigurationEntry( - JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null - && conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null - && conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null) { - - return false; - } - } catch(Exception e) { - // No Jaas configuration defined. - return false; - } - - // Master & RSs uses hbase.zookeeper.client.* - return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); - } - - private static ArrayList createACL(ZooKeeperWatcher zkw, String node) { - return createACL(zkw, node, isSecureZooKeeper(zkw.getConfiguration())); - } - - public static ArrayList createACL(ZooKeeperWatcher zkw, String node, - boolean isSecureZooKeeper) { - if (!node.startsWith(zkw.znodePaths.baseZNode)) { - return Ids.OPEN_ACL_UNSAFE; - } - if (isSecureZooKeeper) { - ArrayList acls = new ArrayList<>(); - // add permission to hbase supper user - String[] superUsers = zkw.getConfiguration().getStrings(Superusers.SUPERUSER_CONF_KEY); - String hbaseUser = null; - try { - hbaseUser = UserGroupInformation.getCurrentUser().getShortUserName(); - } catch (IOException e) { - LOG.debug("Could not acquire current User.", e); - } - if (superUsers != null) { - List groups = new ArrayList<>(); - for (String user : superUsers) { - if (AuthUtil.isGroupPrincipal(user)) { - // TODO: Set node ACL for groups when ZK supports this feature - groups.add(user); - } else { - if(!user.equals(hbaseUser)) { - acls.add(new ACL(Perms.ALL, new Id("sasl", user))); - } - } - } - if (!groups.isEmpty()) { - LOG.warn("Znode ACL setting for group " + groups - + " is skipped, ZooKeeper doesn't support this feature presently."); - } - } - // Certain znodes are accessed directly by the client, - // so they must be readable by non-authenticated clients - if (zkw.isClientReadable(node)) { - acls.addAll(Ids.CREATOR_ALL_ACL); - acls.addAll(Ids.READ_ACL_UNSAFE); - } else { - acls.addAll(Ids.CREATOR_ALL_ACL); - } - return acls; - } else { - return Ids.OPEN_ACL_UNSAFE; - } - } - - // - // Node creation - // - - /** - * - * Set the specified znode to be an ephemeral node carrying the specified - * data. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node. - * - * If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data of node - * @return true if node created, false if not, watch set in both cases - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean createEphemeralNodeAndWatch(ZooKeeperWatcher zkw, - String znode, byte [] data) - throws KeeperException { - boolean ret = true; - try { - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.EPHEMERAL); - } catch (KeeperException.NodeExistsException nee) { - ret = false; - } catch (InterruptedException e) { - LOG.info("Interrupted", e); - Thread.currentThread().interrupt(); - } - if(!watchAndCheckExists(zkw, znode)) { - // It did exist but now it doesn't, try again - return createEphemeralNodeAndWatch(zkw, znode, data); - } - return ret; - } - - /** - * Creates the specified znode to be a persistent node carrying the specified - * data. - * - * Returns true if the node was successfully created, false if the node - * already existed. - * - * If the node is created successfully, a watcher is also set on the node. - * - * If the node is not created successfully because it already exists, this - * method will also set a watcher on the node but return false. - * - * If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data of node - * @return true if node created, false if not, watch set in both cases - * @throws KeeperException if unexpected zookeeper exception - */ - public static boolean createNodeIfNotExistsAndWatch( - ZooKeeperWatcher zkw, String znode, byte [] data) - throws KeeperException { - boolean ret = true; - try { - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.PERSISTENT); - } catch (KeeperException.NodeExistsException nee) { - ret = false; - } catch (InterruptedException e) { - zkw.interruptedException(e); - return false; - } - try { - zkw.getRecoverableZooKeeper().exists(znode, zkw); - } catch (InterruptedException e) { - zkw.interruptedException(e); - return false; - } - return ret; - } - - /** - * Creates the specified znode with the specified data but does not watch it. - * - * Returns the znode of the newly created node - * - * If there is another problem, a KeeperException will be thrown. - * - * @param zkw zk reference - * @param znode path of node - * @param data data of node - * @param createMode specifying whether the node to be created is ephemeral and/or sequential - * @return true name of the newly created znode or null - * @throws KeeperException if unexpected zookeeper exception - */ - public static String createNodeIfNotExistsNoWatch(ZooKeeperWatcher zkw, String znode, - byte[] data, CreateMode createMode) throws KeeperException { - - String createdZNode = null; - try { - createdZNode = zkw.getRecoverableZooKeeper().create(znode, data, - createACL(zkw, znode), createMode); - } catch (KeeperException.NodeExistsException nee) { - return znode; - } catch (InterruptedException e) { - zkw.interruptedException(e); - return null; - } - return createdZNode; - } - - /** - * Creates the specified node with the specified data and watches it. - * - *

Throws an exception if the node already exists. - * - *

The node created is persistent and open access. - * - *

Returns the version number of the created node if successful. - * - * @param zkw zk reference - * @param znode path of node to create - * @param data data of node to create - * @return version of node created - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.NodeExistsException if node already exists - */ - public static int createAndWatch(ZooKeeperWatcher zkw, - String znode, byte [] data) - throws KeeperException, KeeperException.NodeExistsException { - try { - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.PERSISTENT); - Stat stat = zkw.getRecoverableZooKeeper().exists(znode, zkw); - if (stat == null){ - // Likely a race condition. Someone deleted the znode. - throw KeeperException.create(KeeperException.Code.SYSTEMERROR, - "ZK.exists returned null (i.e.: znode does not exist) for znode=" + znode); - } - return stat.getVersion(); - } catch (InterruptedException e) { - zkw.interruptedException(e); - return -1; - } - } - - /** - * Async creates the specified node with the specified data. - * - *

Throws an exception if the node already exists. - * - *

The node created is persistent and open access. - * - * @param zkw zk reference - * @param znode path of node to create - * @param data data of node to create - * @param cb - * @param ctx - */ - public static void asyncCreate(ZooKeeperWatcher zkw, - String znode, byte [] data, final AsyncCallback.StringCallback cb, - final Object ctx) { - zkw.getRecoverableZooKeeper().getZooKeeper().create(znode, data, - createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx); - } - - /** - * Creates the specified node, iff the node does not exist. Does not set a - * watch and fails silently if the node already exists. - * - * The node created is persistent and open access. - * - * @param zkw zk reference - * @param znode path of node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void createAndFailSilent(ZooKeeperWatcher zkw, - String znode) throws KeeperException { - createAndFailSilent(zkw, znode, new byte[0]); - } - - /** - * Creates the specified node containing specified data, iff the node does not exist. Does - * not set a watch and fails silently if the node already exists. - * - * The node created is persistent and open access. - * - * @param zkw zk reference - * @param znode path of node - * @param data a byte array data to store in the znode - * @throws KeeperException if unexpected zookeeper exception - */ - public static void createAndFailSilent(ZooKeeperWatcher zkw, - String znode, byte[] data) - throws KeeperException { - createAndFailSilent(zkw, - (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, data)); - } - - private static void createAndFailSilent(ZooKeeperWatcher zkw, CreateAndFailSilent cafs) - throws KeeperException { - CreateRequest create = (CreateRequest)toZooKeeperOp(zkw, cafs).toRequestRecord(); - String znode = create.getPath(); - try { - RecoverableZooKeeper zk = zkw.getRecoverableZooKeeper(); - if (zk.exists(znode, false) == null) { - zk.create(znode, create.getData(), create.getAcl(), CreateMode.fromFlag(create.getFlags())); - } - } catch(KeeperException.NodeExistsException nee) { - } catch(KeeperException.NoAuthException nee){ - try { - if (null == zkw.getRecoverableZooKeeper().exists(znode, false)) { - // If we failed to create the file and it does not already exist. - throw(nee); - } - } catch (InterruptedException ie) { - zkw.interruptedException(ie); - } - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - /** - * Creates the specified node and all parent nodes required for it to exist. - * - * No watches are set and no errors are thrown if the node already exists. - * - * The nodes created are persistent and open access. - * - * @param zkw zk reference - * @param znode path of node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void createWithParents(ZooKeeperWatcher zkw, String znode) - throws KeeperException { - createWithParents(zkw, znode, new byte[0]); - } - - /** - * Creates the specified node and all parent nodes required for it to exist. The creation of - * parent znodes is not atomic with the leafe znode creation but the data is written atomically - * when the leaf node is created. - * - * No watches are set and no errors are thrown if the node already exists. - * - * The nodes created are persistent and open access. - * - * @param zkw zk reference - * @param znode path of node - * @throws KeeperException if unexpected zookeeper exception - */ - public static void createWithParents(ZooKeeperWatcher zkw, String znode, byte[] data) - throws KeeperException { - try { - if(znode == null) { - return; - } - zkw.getRecoverableZooKeeper().create(znode, data, createACL(zkw, znode), - CreateMode.PERSISTENT); - } catch(KeeperException.NodeExistsException nee) { - return; - } catch(KeeperException.NoNodeException nne) { - createWithParents(zkw, getParent(znode)); - createWithParents(zkw, znode, data); - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - // - // Deletes - // - - /** - * Delete the specified node. Sets no watches. Throws all exceptions. - */ - public static void deleteNode(ZooKeeperWatcher zkw, String node) - throws KeeperException { - deleteNode(zkw, node, -1); - } - - /** - * Delete the specified node with the specified version. Sets no watches. - * Throws all exceptions. - */ - public static boolean deleteNode(ZooKeeperWatcher zkw, String node, - int version) - throws KeeperException { - try { - zkw.getRecoverableZooKeeper().delete(node, version); - return true; - } catch(KeeperException.BadVersionException bve) { - return false; - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - return false; - } - } - - /** - * Deletes the specified node. Fails silent if the node does not exist. - * @param zkw - * @param node - * @throws KeeperException - */ - public static void deleteNodeFailSilent(ZooKeeperWatcher zkw, String node) - throws KeeperException { - deleteNodeFailSilent(zkw, - (DeleteNodeFailSilent)ZKUtilOp.deleteNodeFailSilent(node)); - } - - private static void deleteNodeFailSilent(ZooKeeperWatcher zkw, - DeleteNodeFailSilent dnfs) throws KeeperException { - DeleteRequest delete = (DeleteRequest)toZooKeeperOp(zkw, dnfs).toRequestRecord(); - try { - zkw.getRecoverableZooKeeper().delete(delete.getPath(), delete.getVersion()); - } catch(KeeperException.NoNodeException nne) { - } catch(InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - - /** - * Delete the specified node and all of it's children. - *

- * If the node does not exist, just returns. - *

- * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - */ - public static void deleteNodeRecursively(ZooKeeperWatcher zkw, String node) - throws KeeperException { - deleteNodeRecursivelyMultiOrSequential(zkw, true, node); - } - - /** - * Delete all the children of the specified node but not the node itself. - * - * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - * - * @throws KeeperException - */ - public static void deleteChildrenRecursively(ZooKeeperWatcher zkw, String node) - throws KeeperException { - deleteChildrenRecursivelyMultiOrSequential(zkw, true, node); - } - - /** - * Delete all the children of the specified node but not the node itself. This - * will first traverse the znode tree for listing the children and then delete - * these znodes using multi-update api or sequential based on the specified - * configurations. - *

- * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - *

- * If the following is true: - *

    - *
  • runSequentialOnMultiFailure is true - *
- * on calling multi, we get a ZooKeeper exception that can be handled by a - * sequential call(*), we retry the operations one-by-one (sequentially). - * - * @param zkw - * - zk reference - * @param runSequentialOnMultiFailure - * - if true when we get a ZooKeeper exception that could retry the - * operations one-by-one (sequentially) - * @param pathRoots - * - path of the parent node(s) - * @throws KeeperException.NotEmptyException - * if node has children while deleting - * @throws KeeperException - * if unexpected ZooKeeper exception - * @throws IllegalArgumentException - * if an invalid path is specified - */ - public static void deleteChildrenRecursivelyMultiOrSequential( - ZooKeeperWatcher zkw, boolean runSequentialOnMultiFailure, - String... pathRoots) throws KeeperException { - if (pathRoots == null || pathRoots.length <= 0) { - LOG.warn("Given path is not valid!"); - return; - } - List ops = new ArrayList<>(); - for (String eachRoot : pathRoots) { - List children = listChildrenBFSNoWatch(zkw, eachRoot); - // Delete the leaves first and eventually get rid of the root - for (int i = children.size() - 1; i >= 0; --i) { - ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i))); - } - } - // atleast one element should exist - if (ops.size() > 0) { - multiOrSequential(zkw, ops, runSequentialOnMultiFailure); - } - } - - /** - * Delete the specified node and its children. This traverse the - * znode tree for listing the children and then delete - * these znodes including the parent using multi-update api or - * sequential based on the specified configurations. - *

- * Sets no watches. Throws all exceptions besides dealing with deletion of - * children. - *

- * If the following is true: - *

    - *
  • runSequentialOnMultiFailure is true - *
- * on calling multi, we get a ZooKeeper exception that can be handled by a - * sequential call(*), we retry the operations one-by-one (sequentially). - * - * @param zkw - * - zk reference - * @param runSequentialOnMultiFailure - * - if true when we get a ZooKeeper exception that could retry the - * operations one-by-one (sequentially) - * @param pathRoots - * - path of the parent node(s) - * @throws KeeperException.NotEmptyException - * if node has children while deleting - * @throws KeeperException - * if unexpected ZooKeeper exception - * @throws IllegalArgumentException - * if an invalid path is specified - */ - public static void deleteNodeRecursivelyMultiOrSequential(ZooKeeperWatcher zkw, - boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException { - if (pathRoots == null || pathRoots.length <= 0) { - LOG.warn("Given path is not valid!"); - return; - } - List ops = new ArrayList<>(); - for (String eachRoot : pathRoots) { - // ZooKeeper Watches are one time triggers; When children of parent nodes are deleted - // recursively, must set another watch, get notified of delete node - List children = listChildrenBFSAndWatchThem(zkw, eachRoot); - // Delete the leaves first and eventually get rid of the root - for (int i = children.size() - 1; i >= 0; --i) { - ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i))); - } - try { - if (zkw.getRecoverableZooKeeper().exists(eachRoot, zkw) != null) { - ops.add(ZKUtilOp.deleteNodeFailSilent(eachRoot)); - } - } catch (InterruptedException e) { - zkw.interruptedException(e); - } - } - // atleast one element should exist - if (ops.size() > 0) { - multiOrSequential(zkw, ops, runSequentialOnMultiFailure); - } - } - - /** - * BFS Traversal of all the children under path, with the entries in the list, - * in the same order as that of the traversal. Lists all the children without - * setting any watches. - * - * @param zkw - * - zk reference - * @param znode - * - path of node - * @return list of children znodes under the path - * @throws KeeperException - * if unexpected ZooKeeper exception - */ - private static List listChildrenBFSNoWatch(ZooKeeperWatcher zkw, - final String znode) throws KeeperException { - Deque queue = new LinkedList<>(); - List tree = new ArrayList<>(); - queue.add(znode); - while (true) { - String node = queue.pollFirst(); - if (node == null) { - break; - } - List children = listChildrenNoWatch(zkw, node); - if (children == null) { - continue; - } - for (final String child : children) { - final String childPath = node + "/" + child; - queue.add(childPath); - tree.add(childPath); - } - } - return tree; - } - - /** - * BFS Traversal of all the children under path, with the entries in the list, - * in the same order as that of the traversal. - * Lists all the children and set watches on to them. - * - * @param zkw - * - zk reference - * @param znode - * - path of node - * @return list of children znodes under the path - * @throws KeeperException - * if unexpected ZooKeeper exception - */ - private static List listChildrenBFSAndWatchThem(ZooKeeperWatcher zkw, final String znode) - throws KeeperException { - Deque queue = new LinkedList<>(); - List tree = new ArrayList<>(); - queue.add(znode); - while (true) { - String node = queue.pollFirst(); - if (node == null) { - break; - } - List children = listChildrenAndWatchThem(zkw, node); - if (children == null) { - continue; - } - for (final String child : children) { - final String childPath = node + "/" + child; - queue.add(childPath); - tree.add(childPath); - } - } - return tree; - } - - /** - * Represents an action taken by ZKUtil, e.g. createAndFailSilent. - * These actions are higher-level than ZKOp actions, which represent - * individual actions in the ZooKeeper API, like create. - */ - public abstract static class ZKUtilOp { - private String path; - - private ZKUtilOp(String path) { - this.path = path; - } - - /** - * @return a createAndFailSilent ZKUtilOp - */ - public static ZKUtilOp createAndFailSilent(String path, byte[] data) { - return new CreateAndFailSilent(path, data); - } - - /** - * @return a deleteNodeFailSilent ZKUtilOP - */ - public static ZKUtilOp deleteNodeFailSilent(String path) { - return new DeleteNodeFailSilent(path); - } - - /** - * @return a setData ZKUtilOp - */ - public static ZKUtilOp setData(String path, byte [] data) { - return new SetData(path, data); - } - - /** - * @return path to znode where the ZKOp will occur - */ - public String getPath() { - return path; - } - - /** - * ZKUtilOp representing createAndFailSilent in ZooKeeper - * (attempt to create node, ignore error if already exists) - */ - public static class CreateAndFailSilent extends ZKUtilOp { - private byte [] data; - - private CreateAndFailSilent(String path, byte [] data) { - super(path); - this.data = data; - } - - public byte[] getData() { - return data; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof CreateAndFailSilent)) return false; - - CreateAndFailSilent op = (CreateAndFailSilent) o; - return getPath().equals(op.getPath()) && Arrays.equals(data, op.data); - } - - @Override - public int hashCode() { - int ret = 17 + getPath().hashCode() * 31; - return ret * 31 + Bytes.hashCode(data); - } - } - - /** - * ZKUtilOp representing deleteNodeFailSilent in ZooKeeper - * (attempt to delete node, ignore error if node doesn't exist) - */ - public static class DeleteNodeFailSilent extends ZKUtilOp { - private DeleteNodeFailSilent(String path) { - super(path); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof DeleteNodeFailSilent)) return false; - - return super.equals(o); - } - - @Override - public int hashCode() { - return getPath().hashCode(); - } - } - - /** - * ZKUtilOp representing setData in ZooKeeper - */ - public static class SetData extends ZKUtilOp { - private byte [] data; - - private SetData(String path, byte [] data) { - super(path); - this.data = data; - } - - public byte[] getData() { - return data; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof SetData)) return false; - - SetData op = (SetData) o; - return getPath().equals(op.getPath()) && Arrays.equals(data, op.data); - } - - @Override - public int hashCode() { - int ret = getPath().hashCode(); - return ret * 31 + Bytes.hashCode(data); - } - } - } - - /** - * Convert from ZKUtilOp to ZKOp - */ - private static Op toZooKeeperOp(ZooKeeperWatcher zkw, ZKUtilOp op) - throws UnsupportedOperationException { - if(op == null) return null; - - if (op instanceof CreateAndFailSilent) { - CreateAndFailSilent cafs = (CreateAndFailSilent)op; - return Op.create(cafs.getPath(), cafs.getData(), createACL(zkw, cafs.getPath()), - CreateMode.PERSISTENT); - } else if (op instanceof DeleteNodeFailSilent) { - DeleteNodeFailSilent dnfs = (DeleteNodeFailSilent)op; - return Op.delete(dnfs.getPath(), -1); - } else if (op instanceof SetData) { - SetData sd = (SetData)op; - return Op.setData(sd.getPath(), sd.getData(), -1); - } else { - throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " - + op.getClass().getName()); - } - } - - /** - * Use ZooKeeper's multi-update functionality. - * - * If all of the following are true: - * - runSequentialOnMultiFailure is true - * - on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*) - * Then: - * - we retry the operations one-by-one (sequentially) - * - * Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, - * a user could call "createAndFailSilent" to ensure that a node exists if they don't care who - * actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). - * This will cause all operations in the multi to fail, however, because - * the NodeExistsException that zk.create throws will fail the multi transaction. - * In this case, if the previous conditions hold, the commands are run sequentially, which should - * result in the correct final state, but means that the operations will not run atomically. - * - * @throws KeeperException - */ - public static void multiOrSequential(ZooKeeperWatcher zkw, List ops, - boolean runSequentialOnMultiFailure) throws KeeperException { - if (zkw.getConfiguration().get("hbase.zookeeper.useMulti") != null) { - LOG.warn("hbase.zookeeper.useMulti is deprecated. Default to true always."); - } - if (ops == null) return; - - List zkOps = new LinkedList<>(); - for (ZKUtilOp op : ops) { - zkOps.add(toZooKeeperOp(zkw, op)); - } - try { - zkw.getRecoverableZooKeeper().multi(zkOps); - } catch (KeeperException ke) { - switch (ke.code()) { - case NODEEXISTS: - case NONODE: - case BADVERSION: - case NOAUTH: - // if we get an exception that could be solved by running sequentially - // (and the client asked us to), then break out and run sequentially - if (runSequentialOnMultiFailure) { - LOG.info("On call to ZK.multi, received exception: " + ke.toString() + "." - + " Attempting to run operations sequentially because" - + " runSequentialOnMultiFailure is: " + runSequentialOnMultiFailure + "."); - processSequentially(zkw, ops); - break; - } - default: - throw ke; - } - } catch (InterruptedException ie) { - zkw.interruptedException(ie); - } - } - - private static void processSequentially(ZooKeeperWatcher zkw, List ops) - throws KeeperException, NoNodeException { - for (ZKUtilOp op : ops) { - if (op instanceof CreateAndFailSilent) { - createAndFailSilent(zkw, (CreateAndFailSilent) op); - } else if (op instanceof DeleteNodeFailSilent) { - deleteNodeFailSilent(zkw, (DeleteNodeFailSilent) op); - } else if (op instanceof SetData) { - setData(zkw, (SetData) op); - } else { - throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " - + op.getClass().getName()); - } - } - } + public static final int DEFAULT_ZOOKEEPER_DUMP_CONNECTION_TIMEOUT = 1000; // // ZooKeeper cluster information // - /** @return String dump of everything in ZooKeeper. */ - public static String dump(ZooKeeperWatcher zkw) { + public static String dump(ZKWatcher zkw) { StringBuilder sb = new StringBuilder(); try { sb.append("HBase is rooted at ").append(zkw.znodePaths.baseZNode); @@ -1716,7 +68,7 @@ public class ZKUtil { sb.append("<>"); } sb.append("\nBackup master addresses:"); - for (String child : listChildrenNoWatch(zkw, zkw.znodePaths.backupMasterAddressesZNode)) { + for (String child : ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.backupMasterAddressesZNode)) { sb.append("\n ").append(child); } sb.append("\nRegion server holding hbase:meta: " @@ -1729,7 +81,7 @@ public class ZKUtil { + new MetaTableLocator().getMetaRegionLocation(zkw, i)); } sb.append("\nRegion servers:"); - for (String child : listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode)) { + for (String child : ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode)) { sb.append("\n ").append(child); } try { @@ -1742,7 +94,9 @@ public class ZKUtil { for (String server : servers) { sb.append("\n ").append(server); try { - String[] stat = getServerStats(server, ZKUtil.zkDumpConnectionTimeOut); + int zkDumpConnectionTimeOut = conf.getInt("zookeeper.dump.connection.timeout", + DEFAULT_ZOOKEEPER_DUMP_CONNECTION_TIMEOUT); + String[] stat = ZooKeeperUtil.getServerStats(server, zkDumpConnectionTimeOut); if (stat == null) { sb.append("[Error] invalid quorum server: " + server); @@ -1769,38 +123,21 @@ public class ZKUtil { * @param sb * @throws KeeperException */ - private static void getReplicationZnodesDump(ZooKeeperWatcher zkw, StringBuilder sb) + private static void getReplicationZnodesDump(ZKWatcher zkw, StringBuilder sb) throws KeeperException { String replicationZnode = zkw.znodePaths.replicationZNode; - if (ZKUtil.checkExists(zkw, replicationZnode) == -1) return; + if (ZooKeeperUtil.checkExists(zkw, replicationZnode) == -1) return; // do a ls -r on this znode sb.append("\n").append(replicationZnode).append(": "); - List children = ZKUtil.listChildrenNoWatch(zkw, replicationZnode); + List children = ZooKeeperUtil.listChildrenNoWatch(zkw, replicationZnode); for (String child : children) { - String znode = joinZNode(replicationZnode, child); + String znode = ZooKeeperUtil.joinZNode(replicationZnode, child); if (znode.equals(zkw.znodePaths.peersZNode)) { appendPeersZnodes(zkw, znode, sb); } else if (znode.equals(zkw.znodePaths.queuesZNode)) { appendRSZnodes(zkw, znode, sb); } else if (znode.equals(zkw.znodePaths.hfileRefsZNode)) { - appendHFileRefsZnodes(zkw, znode, sb); - } - } - } - - private static void appendHFileRefsZnodes(ZooKeeperWatcher zkw, String hfileRefsZnode, - StringBuilder sb) throws KeeperException { - sb.append("\n").append(hfileRefsZnode).append(": "); - for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, hfileRefsZnode)) { - String znodeToProcess = ZKUtil.joinZNode(hfileRefsZnode, peerIdZnode); - sb.append("\n").append(znodeToProcess).append(": "); - List peerHFileRefsZnodes = ZKUtil.listChildrenNoWatch(zkw, znodeToProcess); - int size = peerHFileRefsZnodes.size(); - for (int i = 0; i < size; i++) { - sb.append(peerHFileRefsZnodes.get(i)); - if (i != size - 1) { - sb.append(", "); - } + ZooKeeperUtil.appendHFileRefsZnodes(zkw, znode, sb); } } } @@ -1810,13 +147,13 @@ public class ZKUtil { * @param zkw * @return aq string of replication znodes and log positions */ - public static String getReplicationZnodesDump(ZooKeeperWatcher zkw) throws KeeperException { + public static String getReplicationZnodesDump(ZKWatcher zkw) throws KeeperException { StringBuilder sb = new StringBuilder(); getReplicationZnodesDump(zkw, sb); return sb.toString(); } - private static void appendRSZnodes(ZooKeeperWatcher zkw, String znode, StringBuilder sb) + private static void appendRSZnodes(ZKWatcher zkw, String znode, StringBuilder sb) throws KeeperException { List stack = new LinkedList<>(); stack.add(znode); @@ -1825,7 +162,7 @@ public class ZKUtil { sb.append("\n").append(znodeToProcess).append(": "); byte[] data; try { - data = ZKUtil.getData(zkw, znodeToProcess); + data = ZooKeeperUtil.getData(zkw, znodeToProcess); } catch (InterruptedException e) { zkw.interruptedException(e); return; @@ -1833,7 +170,7 @@ public class ZKUtil { if (data != null && data.length > 0) { // log position long position = 0; try { - position = ZKUtil.parseWALPositionFrom(ZKUtil.getData(zkw, znodeToProcess)); + position = ZKUtil.parseWALPositionFrom(ZooKeeperUtil.getData(zkw, znodeToProcess)); sb.append(position); } catch (DeserializationException ignored) { } catch (InterruptedException e) { @@ -1841,21 +178,21 @@ public class ZKUtil { return; } } - for (String zNodeChild : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) { - stack.add(ZKUtil.joinZNode(znodeToProcess, zNodeChild)); + for (String zNodeChild : ZooKeeperUtil.listChildrenNoWatch(zkw, znodeToProcess)) { + stack.add(ZooKeeperUtil.joinZNode(znodeToProcess, zNodeChild)); } } while (stack.size() > 0); } - private static void appendPeersZnodes(ZooKeeperWatcher zkw, String peersZnode, + private static void appendPeersZnodes(ZKWatcher zkw, String peersZnode, StringBuilder sb) throws KeeperException { int pblen = ProtobufUtil.lengthOfPBMagic(); sb.append("\n").append(peersZnode).append(": "); - for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, peersZnode)) { - String znodeToProcess = ZKUtil.joinZNode(peersZnode, peerIdZnode); + for (String peerIdZnode : ZooKeeperUtil.listChildrenNoWatch(zkw, peersZnode)) { + String znodeToProcess = ZooKeeperUtil.joinZNode(peersZnode, peerIdZnode); byte[] data; try { - data = ZKUtil.getData(zkw, znodeToProcess); + data = ZooKeeperUtil.getData(zkw, znodeToProcess); } catch (InterruptedException e) { zkw.interruptedException(e); return; @@ -1875,18 +212,18 @@ public class ZKUtil { } } - private static void appendPeerState(ZooKeeperWatcher zkw, String znodeToProcess, + private static void appendPeerState(ZKWatcher zkw, String znodeToProcess, StringBuilder sb) throws KeeperException, InvalidProtocolBufferException { String peerState = zkw.getConfiguration().get("zookeeper.znode.replication.peers.state", "peer-state"); int pblen = ProtobufUtil.lengthOfPBMagic(); - for (String child : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) { + for (String child : ZooKeeperUtil.listChildrenNoWatch(zkw, znodeToProcess)) { if (!child.equals(peerState)) continue; - String peerStateZnode = ZKUtil.joinZNode(znodeToProcess, child); + String peerStateZnode = ZooKeeperUtil.joinZNode(znodeToProcess, child); sb.append("\n").append(peerStateZnode).append(": "); byte[] peerStateData; try { - peerStateData = ZKUtil.getData(zkw, peerStateZnode); + peerStateData = ZooKeeperUtil.getData(zkw, peerStateZnode); ReplicationProtos.ReplicationState.Builder builder = ReplicationProtos.ReplicationState.newBuilder(); ProtobufUtil.mergeFrom(builder, peerStateData, pblen, peerStateData.length - pblen); @@ -1900,156 +237,6 @@ public class ZKUtil { } } - /** - * Gets the statistics from the given server. - * - * @param server The server to get the statistics from. - * @param timeout The socket timeout to use. - * @return The array of response strings. - * @throws IOException When the socket communication fails. - */ - public static String[] getServerStats(String server, int timeout) - throws IOException { - String[] sp = server.split(":"); - if (sp == null || sp.length == 0) { - return null; - } - - String host = sp[0]; - int port = sp.length > 1 ? Integer.parseInt(sp[1]) - : HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; - - InetSocketAddress sockAddr = new InetSocketAddress(host, port); - try (Socket socket = new Socket()) { - socket.connect(sockAddr, timeout); - - socket.setSoTimeout(timeout); - try (PrintWriter out = new PrintWriter(socket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream()))) { - out.println("stat"); - out.flush(); - ArrayList res = new ArrayList<>(); - while (true) { - String line = in.readLine(); - if (line != null) { - res.add(line); - } else { - break; - } - } - return res.toArray(new String[res.size()]); - } - } - } - - private static void logRetrievedMsg(final ZooKeeperWatcher zkw, - final String znode, final byte [] data, final boolean watcherSet) { - if (!LOG.isTraceEnabled()) return; - LOG.trace(zkw.prefix("Retrieved " + ((data == null)? 0: data.length) + - " byte(s) of data from znode " + znode + - (watcherSet? " and set watcher; ": "; data=") + - (data == null? "null": data.length == 0? "empty": ( - znode.startsWith(zkw.znodePaths.metaZNodePrefix)? - getServerNameOrEmptyString(data): - znode.startsWith(zkw.znodePaths.backupMasterAddressesZNode)? - getServerNameOrEmptyString(data): - StringUtils.abbreviate(Bytes.toStringBinary(data), 32))))); - } - - private static String getServerNameOrEmptyString(final byte [] data) { - try { - return ProtobufUtil.parseServerNameFrom(data).toString(); - } catch (DeserializationException e) { - return ""; - } - } - - /** - * Waits for HBase installation's base (parent) znode to become available. - * @throws IOException on ZK errors - */ - public static void waitForBaseZNode(Configuration conf) throws IOException { - LOG.info("Waiting until the base znode is available"); - String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), - conf.getInt(HConstants.ZK_SESSION_TIMEOUT, - HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); - - final int maxTimeMs = 10000; - final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; - - KeeperException keeperEx = null; - try { - try { - for (int attempt = 0; attempt < maxNumAttempts; ++attempt) { - try { - if (zk.exists(parentZNode, false) != null) { - LOG.info("Parent znode exists: " + parentZNode); - keeperEx = null; - break; - } - } catch (KeeperException e) { - keeperEx = e; - } - Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS); - } - } finally { - zk.close(); - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - - if (keeperEx != null) { - throw new IOException(keeperEx); - } - } - - /** - * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. - * Used when can't let a {@link DeserializationException} out w/o changing public API. - * @param e Exception to convert - * @return Converted exception - */ - public static KeeperException convert(final DeserializationException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - return ke; - } - - /** - * Recursively print the current state of ZK (non-transactional) - * @param root name of the root directory in zk to print - */ - public static void logZKTree(ZooKeeperWatcher zkw, String root) { - if (!LOG.isDebugEnabled()) return; - LOG.debug("Current zk system:"); - String prefix = "|-"; - LOG.debug(prefix + root); - try { - logZKTree(zkw, root, prefix); - } catch (KeeperException e) { - throw new RuntimeException(e); - } - } - - /** - * Helper method to print the current state of the ZK tree. - * @see #logZKTree(ZooKeeperWatcher, String) - * @throws KeeperException if an unexpected exception occurs - */ - protected static void logZKTree(ZooKeeperWatcher zkw, String root, String prefix) - throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zkw, root); - if (children == null) return; - for (String child : children) { - LOG.debug(prefix + child); - String node = ZKUtil.joinZNode(root.equals("/") ? "" : root, child); - logZKTree(zkw, node, prefix + "---"); - } - } - /** * @param position * @return Serialized protobuf of position with pb magic prefix prepended suitable @@ -2140,4 +327,19 @@ public class ZKUtil { } return storeIds; } + + /** + * @param bytes - Content of a failed region server or recovering region znode. + * @return long - The last flushed sequence Id for the region server + */ + public static long parseLastFlushedSequenceIdFrom(final byte[] bytes) { + long lastRecordedFlushedSequenceId = -1l; + try { + lastRecordedFlushedSequenceId = parseWALPositionFrom(bytes); + } catch (DeserializationException e) { + lastRecordedFlushedSequenceId = -1l; + LOG.warn("Can't parse last flushed sequence Id", e); + } + return lastRecordedFlushedSequenceId; + } } diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 135e720557..fd1baf0c43 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -241,6 +241,10 @@ com.google.protobuf protobuf-java
+ + org.apache.hbase.thirdparty + hbase-shaded-protobuf + org.apache.htrace diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Abortable.java similarity index 100% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java rename to hbase-common/src/main/java/org/apache/hadoop/hbase/Abortable.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 45f22a67ca..6bc4a83a03 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1029,6 +1029,8 @@ public final class HConstants { public static final String META_REPLICAS_NUM = "hbase.meta.replica.count"; public static final int DEFAULT_META_REPLICA_NUM = 1; + public static final int DEFAULT_REPLICA_ID = 0; + /** * The name of the configuration parameter that specifies * the number of bytes in a newly created checksum chunk. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufHelpers.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufHelpers.java new file mode 100644 index 0000000000..5437fb614d --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufHelpers.java @@ -0,0 +1,43 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.protobuf; + +import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; + +import java.io.IOException; + +public class ProtobufHelpers { + /** + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding + * buffers when working with byte arrays + * @param builder current message builder + * @param b byte array + * @param offset + * @param length + * @throws IOException + */ + public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length) + throws IOException { + final CodedInputStream codedInput = CodedInputStream.newInstance(b, offset, length); + codedInput.setSizeLimit(length); + builder.mergeFrom(codedInput); + codedInput.checkLastTagWas(0); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java similarity index 64% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java rename to hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java index 5268dafb8a..0076327ddb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufMagic.java @@ -17,16 +17,18 @@ */ package org.apache.hadoop.hbase.protobuf; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import static org.apache.hadoop.hbase.util.Bytes.compareTo; + /** * Protobufs utility. */ @InterfaceAudience.Private -public class ProtobufMagic { - - private ProtobufMagic() { - } +final public class ProtobufMagic { + private ProtobufMagic() {} /** * Magic we put ahead of a serialized protobuf message. @@ -44,30 +46,28 @@ public class ProtobufMagic { return isPBMagicPrefix(bytes, 0, bytes.length); } - /* - * Copied from Bytes.java to here - * hbase-common now depends on hbase-protocol - * Referencing Bytes.java directly would create circular dependency + /** + * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, + * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to + * znodes, etc. + * @param bytes Bytes to decorate + * @return The passed bytes with magic prepended (Creates a new + * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ - private static int compareTo(byte[] buffer1, int offset1, int length1, - byte[] buffer2, int offset2, int length2) { - // Short circuit equal case - if (buffer1 == buffer2 && - offset1 == offset2 && - length1 == length2) { - return 0; - } - // Bring WritableComparator code local - int end1 = offset1 + length1; - int end2 = offset2 + length2; - for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { - int a = (buffer1[i] & 0xff); - int b = (buffer2[j] & 0xff); - if (a != b) { - return a - b; - } + public static byte [] prependPBMagic(final byte [] bytes) { + return Bytes.add(PB_MAGIC, bytes); + } + + + /** + * @param bytes bytes to check + * @throws DeserializationException if we are missing the pb magic prefix + */ + public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException { + if (!isPBMagicPrefix(bytes)) { + throw new DeserializationException("Missing pb magic " + + Bytes.toString(PB_MAGIC) + " prefix"); } - return length1 - length2; } /** @@ -87,4 +87,4 @@ public class ProtobufMagic { public static int lengthOfPBMagic() { return PB_MAGIC.length; } -} + } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java index d32e6ea098..61561759ac 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index 9f32fec1de..89b3d6c3e0 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -130,6 +130,10 @@ org.apache.hbase hbase-common + + org.apache.hbase + hbase-zookeeper + org.apache.hbase hbase-protocol diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index 2c40cbecec..33b37924a8 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -77,7 +77,7 @@ public class TestZooKeeperScanPolicyObserver { private void setExpireBefore(long time) throws KeeperException, InterruptedException, IOException { - ZooKeeper zk = UTIL.getZooKeeperWatcher().getRecoverableZooKeeper().getZooKeeper(); + ZooKeeper zk = UTIL.getZooKeeperWatcher().getRecoverableZK().getZooKeeper(); if (zk.exists(ZooKeeperScanPolicyObserver.NODE, false) == null) { zk.create(ZooKeeperScanPolicyObserver.NODE, Bytes.toBytes(time), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 42c8da7e35..1e1faf2568 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -190,6 +190,11 @@ hbase-common jar + + org.apache.hbase + hbase-zookeeper + jar + org.apache.hbase hbase-protocol diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index 8c0d273982..797b89b202 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -26,6 +26,7 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterManager.ServiceType; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java index 667daa8e54..3d5825e84b 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hbase.client.TestMetaWithReplicas; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; import org.apache.hadoop.hbase.testclassification.IntegrationTests; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -36,7 +36,7 @@ import org.junit.experimental.categories.Category; /** * An integration test that starts the cluster with three replicas for the meta * It then creates a table, flushes the meta, kills the server holding the primary. - * After that a client issues put/get requests on the created table - the other + * After that a client issues put/get requests on the created table - the other * replicas of the meta would be used to get the location of the region of the created * table. */ @@ -59,14 +59,14 @@ public class IntegrationTestMetaReplicas { StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); // Make sure there are three servers. util.initializeCluster(3); - ZooKeeperWatcher zkw = util.getZooKeeperWatcher(); + ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = ZKUtil.joinZNode(baseZNode, + String primaryMetaZnode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); // check that the data in the znode is parseable (this would also mean the znode exists) - byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); + byte[] data = ZooKeeperUtil.getData(zkw, primaryMetaZnode); ProtobufUtil.toServerName(data); waitUntilZnodeAvailable(1); waitUntilZnodeAvailable(2); @@ -83,7 +83,7 @@ public class IntegrationTestMetaReplicas { String znode = util.getZooKeeperWatcher().znodePaths.getZNodeForReplica(replicaId); int i = 0; while (i < 1000) { - if (ZKUtil.checkExists(util.getZooKeeperWatcher(), znode) == -1) { + if (ZooKeeperUtil.checkExists(util.getZooKeeperWatcher(), znode) == -1) { Thread.sleep(100); i++; } else break; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java index 0d85e4273b..6c065316d6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java @@ -37,9 +37,9 @@ import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.RecoverableZK; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.hadoop.util.ToolRunner; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.Code; @@ -138,8 +138,8 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool { private void testZNodeACLs() throws IOException, KeeperException, InterruptedException { - ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, "IntegrationTestZnodeACLs", null); - RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher); + ZKWatcher watcher = new ZKWatcher(conf, "IntegrationTestZnodeACLs", null); + RecoverableZK zk = ZooKeeperUtil.connect(this.conf, watcher); String baseZNode = watcher.znodePaths.baseZNode; @@ -154,8 +154,8 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool { LOG.info("Checking ZK permissions: SUCCESS"); } - private void checkZnodePermsRecursive(ZooKeeperWatcher watcher, - RecoverableZooKeeper zk, String znode) throws KeeperException, InterruptedException { + private void checkZnodePermsRecursive(ZKWatcher watcher, + RecoverableZK zk, String znode) throws KeeperException, InterruptedException { boolean expectedWorldReadable = watcher.isClientReadable(znode); @@ -165,7 +165,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool { List children = zk.getChildren(znode, false); for (String child : children) { - checkZnodePermsRecursive(watcher, zk, ZKUtil.joinZNode(znode, child)); + checkZnodePermsRecursive(watcher, zk, ZooKeeperUtil.joinZNode(znode, child)); } } catch (KeeperException ke) { // if we are not authenticated for listChildren, it is fine. @@ -175,7 +175,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool { } } - private void assertZnodePerms(RecoverableZooKeeper zk, String znode, + private void assertZnodePerms(RecoverableZK zk, String znode, boolean expectedWorldReadable) throws KeeperException, InterruptedException { Stat stat = new Stat(); List acls; @@ -200,7 +200,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool { assertTrue(expectedWorldReadable); // assert that anyone can only read assertEquals(perms, Perms.READ); - } else if (superUsers != null && ZooKeeperWatcher.isSuperUserId(superUsers, id)) { + } else if (superUsers != null && ZKWatcher.isSuperUserId(superUsers, id)) { // assert that super user has all the permissions assertEquals(perms, Perms.ALL); } else if (new Id("sasl", masterPrincipal).equals(id)) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index f857d4b2b2..93359cd989 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.MapReduceCell; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; @@ -408,10 +408,10 @@ public class Import extends Configured implements Tool { LOG.info("setting WAL durability to default."); } // TODO: This is kind of ugly doing setup of ZKW just to read the clusterid. - ZooKeeperWatcher zkw = null; + ZKWatcher zkw = null; Exception ex = null; try { - zkw = new ZooKeeperWatcher(conf, context.getTaskAttemptID().toString(), null); + zkw = new ZKWatcher(conf, context.getTaskAttemptID().toString(), null); clusterIds = Collections.singletonList(ZKClusterId.getUUIDForCluster(zkw)); } catch (ZooKeeperConnectionException e) { ex = e; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index acf6ff8f73..b8de9ec088 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.MRJobConfig; @@ -330,10 +330,10 @@ public class VerifyReplication extends Configured implements Tool { private static Pair getPeerQuorumConfig( final Configuration conf, String peerId) throws IOException { - ZooKeeperWatcher localZKW = null; + ZKWatcher localZKW = null; ReplicationPeerZKImpl peer = null; try { - localZKW = new ZooKeeperWatcher(conf, "VerifyReplication", + localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() { @Override public void abort(String why, Throwable e) {} @Override public boolean isAborted() {return false;} diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index a56a470c81..a12bdf6cce 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -125,6 +125,10 @@ org.apache.hbase hbase-common + + org.apache.hbase + hbase-zookeeper + org.apache.hbase diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 9d12211a96..b41bb6ac7c 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -19,11 +19,11 @@ package org.apache.hadoop.hbase.replication; import org.apache.commons.lang3.reflect.ConstructorUtils; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** * A factory class for instantiating replication objects that deal with replication state. @@ -48,17 +48,17 @@ public class ReplicationFactory { return (ReplicationQueuesClient) ConstructorUtils.invokeConstructor(classToBuild, args); } - public static ReplicationPeers getReplicationPeers(final ZooKeeperWatcher zk, Configuration conf, + public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf, Abortable abortable) { return getReplicationPeers(zk, conf, null, abortable); } - public static ReplicationPeers getReplicationPeers(final ZooKeeperWatcher zk, Configuration conf, + public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf, final ReplicationQueuesClient queuesClient, Abortable abortable) { return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable); } - public static ReplicationTracker getReplicationTracker(ZooKeeperWatcher zookeeper, + public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper, final ReplicationPeers replicationPeers, Configuration conf, Abortable abortable, Stoppable stopper) { return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, stopper); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index 2de61cba2b..596ebc73c1 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -30,14 +30,14 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NodeExistsException; @@ -62,7 +62,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase * @param id string representation of this peer's identifier * @param peerConfig configuration for the replication peer */ - public ReplicationPeerZKImpl(ZooKeeperWatcher zkWatcher, Configuration conf, + public ReplicationPeerZKImpl(ZKWatcher zkWatcher, Configuration conf, String id, ReplicationPeerConfig peerConfig, Abortable abortable) throws ReplicationException { @@ -86,7 +86,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase try { this.readPeerStateZnode(); } catch (DeserializationException e) { - throw ZKUtil.convert(e); + throw ZooKeeperUtil.convert(e); } } @@ -244,11 +244,11 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase */ private boolean ensurePeerEnabled(final String path) throws NodeExistsException, KeeperException { - if (ZKUtil.checkExists(zookeeper, path) == -1) { + if (ZooKeeperUtil.checkExists(zookeeper, path) == -1) { // There is a race b/w PeerWatcher and ReplicationZookeeper#add method to create the // peer-state znode. This happens while adding a peer. // The peer state data is set as "ENABLED" by default. - ZKUtil.createNodeIfNotExistsAndWatch(zookeeper, path, + ZooKeeperUtil.createNodeIfNotExistsAndWatch(zookeeper, path, ReplicationStateZKBase.ENABLED_ZNODE_BYTES); return true; } @@ -258,9 +258,9 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase /** * Tracker for state of this peer */ - public class PeerStateTracker extends ZooKeeperNodeTracker { + public class PeerStateTracker extends ZKNodeTracker { - public PeerStateTracker(String peerStateZNode, ZooKeeperWatcher watcher, + public PeerStateTracker(String peerStateZNode, ZKWatcher watcher, Abortable abortable) { super(watcher, peerStateZNode, abortable); } @@ -281,11 +281,11 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase /** * Tracker for PeerConfigNode of this peer */ - public class PeerConfigTracker extends ZooKeeperNodeTracker { + public class PeerConfigTracker extends ZKNodeTracker { ReplicationPeerConfigListener listener; - public PeerConfigTracker(String peerConfigNode, ZooKeeperWatcher watcher, + public PeerConfigTracker(String peerConfigNode, ZKWatcher watcher, Abortable abortable) { super(watcher, peerConfigNode, abortable); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index ca545f7880..8886343ee0 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -42,9 +44,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil.ZKUtilOp; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -83,7 +84,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re private static final Log LOG = LogFactory.getLog(ReplicationPeersZKImpl.class); - public ReplicationPeersZKImpl(final ZooKeeperWatcher zk, final Configuration conf, + public ReplicationPeersZKImpl(final ZKWatcher zk, final Configuration conf, final ReplicationQueuesClient queuesClient, Abortable abortable) { super(zk, conf, abortable); this.abortable = abortable; @@ -94,8 +95,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re @Override public void init() throws ReplicationException { try { - if (ZKUtil.checkExists(this.zookeeper, this.peersZNode) < 0) { - ZKUtil.createWithParents(this.zookeeper, this.peersZNode); + if (ZooKeeperUtil.checkExists(this.zookeeper, this.peersZNode) < 0) { + ZooKeeperUtil.createWithParents(this.zookeeper, this.peersZNode); } } catch (KeeperException e) { throw new ReplicationException("Could not initialize replication peers", e); @@ -126,18 +127,18 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re checkQueuesDeleted(id); - ZKUtil.createWithParents(this.zookeeper, this.peersZNode); + ZooKeeperUtil.createWithParents(this.zookeeper, this.peersZNode); List listOfOps = new ArrayList<>(2); - ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id), + ZKUtilOp op1 = ZooKeeperUtil.ZKUtilOp.createAndFailSilent(getPeerNode(id), ReplicationSerDeHelper.toByteArray(peerConfig)); // b/w PeerWatcher and ReplicationZookeeper#add method to create the // peer-state znode. This happens while adding a peer // The peer state data is set as "ENABLED" by default. - ZKUtilOp op2 = ZKUtilOp.createAndFailSilent(getPeerStateNode(id), ENABLED_ZNODE_BYTES); + ZKUtilOp op2 = ZooKeeperUtil.ZKUtilOp.createAndFailSilent(getPeerStateNode(id), ENABLED_ZNODE_BYTES); listOfOps.add(op1); listOfOps.add(op2); - ZKUtil.multiOrSequential(this.zookeeper, listOfOps, false); + ZooKeeperUtil.multiOrSequential(this.zookeeper, listOfOps, false); // A peer is enabled by default } catch (KeeperException e) { throw new ReplicationException("Could not add peer with id=" + id @@ -152,7 +153,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re throw new IllegalArgumentException("Cannot remove peer with id=" + id + " because that id does not exist."); } - ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)); + ZooKeeperUtil + .deleteNodeRecursively(this.zookeeper, ZooKeeperUtil.joinZNode(this.peersZNode, id)); } catch (KeeperException e) { throw new ReplicationException("Could not remove peer with id=" + id, e); } @@ -204,7 +206,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re throw new ReplicationException("Unable to get tableCFs of the peer with id=" + id); } rpc.setTableCFsMap(tableCFs); - ZKUtil.setData(this.zookeeper, getPeerNode(id), + ZooKeeperUtil.setData(this.zookeeper, getPeerNode(id), ReplicationSerDeHelper.toByteArray(rpc)); LOG.info("Peer tableCFs with id= " + id + " is now " + ReplicationSerDeHelper.convertToString(tableCFs)); @@ -230,7 +232,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } String peerStateZNode = getPeerStateNode(id); try { - return ReplicationPeerZKImpl.isStateEnabled(ZKUtil.getData(this.zookeeper, peerStateZNode)); + return ReplicationPeerZKImpl.isStateEnabled( + ZooKeeperUtil.getData(this.zookeeper, peerStateZNode)); } catch (KeeperException e) { throw new ReplicationException(e); } catch (DeserializationException e) { @@ -249,7 +252,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re Map peers = new TreeMap<>(); List ids = null; try { - ids = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); + ids = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); for (String id : ids) { ReplicationPeerConfig peerConfig = getReplicationPeerConfig(id); if (peerConfig == null) { @@ -286,7 +289,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re String znode = getPeerNode(peerId); byte[] data = null; try { - data = ZKUtil.getData(this.zookeeper, znode); + data = ZooKeeperUtil.getData(this.zookeeper, znode); } catch (InterruptedException e) { LOG.warn("Could not get configuration for peer because the thread " + "was interrupted. peerId=" + peerId); @@ -370,7 +373,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re existingConfig.setBandwidth(newConfig.getBandwidth()); try { - ZKUtil.setData(this.zookeeper, getPeerNode(id), + ZooKeeperUtil.setData(this.zookeeper, getPeerNode(id), ReplicationSerDeHelper.toByteArray(existingConfig)); } catch(KeeperException ke){ @@ -386,7 +389,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re public List getAllPeerIds() { List ids = null; try { - ids = ZKUtil.listChildrenAndWatchThem(this.zookeeper, this.peersZNode); + ids = ZooKeeperUtil.listChildrenAndWatchThem(this.zookeeper, this.peersZNode); } catch (KeeperException e) { this.abortable.abort("Cannot get the list of peers ", e); } @@ -400,7 +403,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re private void addExistingPeers() throws ReplicationException { List znodes = null; try { - znodes = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); + znodes = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); } catch (KeeperException e) { throw new ReplicationException("Error getting the list of peer clusters.", e); } @@ -473,10 +476,10 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re byte[] stateBytes = (state == ReplicationProtos.ReplicationState.State.ENABLED) ? ENABLED_ZNODE_BYTES : DISABLED_ZNODE_BYTES; - if (ZKUtil.checkExists(this.zookeeper, peerStateZNode) != -1) { - ZKUtil.setData(this.zookeeper, peerStateZNode, stateBytes); + if (ZooKeeperUtil.checkExists(this.zookeeper, peerStateZNode) != -1) { + ZooKeeperUtil.setData(this.zookeeper, peerStateZNode, stateBytes); } else { - ZKUtil.createAndWatch(this.zookeeper, peerStateZNode, stateBytes); + ZooKeeperUtil.createAndWatch(this.zookeeper, peerStateZNode, stateBytes); } LOG.info("Peer with id= " + id + " is now " + state.name()); } catch (KeeperException e) { @@ -534,7 +537,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } } // Check for hfile-refs queue - if (-1 != ZKUtil.checkExists(zookeeper, hfileRefsZNode) + if (-1 != ZooKeeperUtil.checkExists(zookeeper, hfileRefsZNode) && queuesClient.getAllPeersFromHFileRefsQueue().contains(peerId)) { throw new ReplicationException("Undeleted queue for peerId: " + peerId + ", found in hfile-refs node path " + hfileRefsZNode); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java index c8328bdec2..75789ce7e2 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java @@ -20,8 +20,9 @@ package org.apache.hadoop.hbase.replication; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; /** * Wrapper around common arguments used to construct ReplicationQueues. Used to construct various @@ -30,7 +31,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @InterfaceAudience.Private public class ReplicationQueuesArguments { - private ZooKeeperWatcher zk; + private ZKWatcher zk; private Configuration conf; private Abortable abort; @@ -39,16 +40,16 @@ public class ReplicationQueuesArguments { this.abort = abort; } - public ReplicationQueuesArguments(Configuration conf, Abortable abort, ZooKeeperWatcher zk) { + public ReplicationQueuesArguments(Configuration conf, Abortable abort, ZKWatcher zk) { this(conf, abort); setZk(zk); } - public ZooKeeperWatcher getZk() { + public ZKWatcher getZk() { return zk; } - public void setZk(ZooKeeperWatcher zk) { + public void setZk(ZKWatcher zk) { this.zk = zk; } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java index 67258c7e17..4c4540e122 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java @@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.replication; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; /** * Wrapper around common arguments used to construct ReplicationQueuesClient. Used to construct @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @InterfaceAudience.Private public class ReplicationQueuesClientArguments extends ReplicationQueuesArguments { public ReplicationQueuesClientArguments(Configuration conf, Abortable abort, - ZooKeeperWatcher zk) { + ZKWatcher zk) { super(conf, abort, zk); } public ReplicationQueuesClientArguments(Configuration conf, Abortable abort) { diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java index 95b2e04d2e..9fd8d5d3e7 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java @@ -25,11 +25,11 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.data.Stat; @@ -43,7 +43,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem this(args.getZk(), args.getConf(), args.getAbortable()); } - public ReplicationQueuesClientZKImpl(final ZooKeeperWatcher zk, Configuration conf, + public ReplicationQueuesClientZKImpl(final ZKWatcher zk, Configuration conf, Abortable abortable) { super(zk, conf, abortable); } @@ -51,8 +51,8 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem @Override public void init() throws ReplicationException { try { - if (ZKUtil.checkExists(this.zookeeper, this.queuesZNode) < 0) { - ZKUtil.createWithParents(this.zookeeper, this.queuesZNode); + if (ZooKeeperUtil.checkExists(this.zookeeper, this.queuesZNode) < 0) { + ZooKeeperUtil.createWithParents(this.zookeeper, this.queuesZNode); } } catch (KeeperException e) { throw new ReplicationException("Internal error while initializing a queues client", e); @@ -61,11 +61,11 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem @Override public List getLogsInQueue(String serverName, String queueId) throws KeeperException { - String znode = ZKUtil.joinZNode(this.queuesZNode, serverName); - znode = ZKUtil.joinZNode(znode, queueId); + String znode = ZooKeeperUtil.joinZNode(this.queuesZNode, serverName); + znode = ZooKeeperUtil.joinZNode(znode, queueId); List result = null; try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + result = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, znode); } catch (KeeperException e) { this.abortable.abort("Failed to get list of wals for queueId=" + queueId + " and serverName=" + serverName, e); @@ -76,10 +76,10 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem @Override public List getAllQueues(String serverName) throws KeeperException { - String znode = ZKUtil.joinZNode(this.queuesZNode, serverName); + String znode = ZooKeeperUtil.joinZNode(this.queuesZNode, serverName); List result = null; try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + result = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, znode); } catch (KeeperException e) { this.abortable.abort("Failed to get list of queues for serverName=" + serverName, e); throw e; @@ -128,7 +128,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem public int getQueuesZNodeCversion() throws KeeperException { try { Stat stat = new Stat(); - ZKUtil.getDataNoWatch(this.zookeeper, this.queuesZNode, stat); + ZooKeeperUtil.getDataNoWatch(this.zookeeper, this.queuesZNode, stat); return stat.getCversion(); } catch (KeeperException e) { this.abortable.abort("Failed to get stat of replication rs node", e); @@ -140,7 +140,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem public int getHFileRefsNodeChangeVersion() throws KeeperException { Stat stat = new Stat(); try { - ZKUtil.getDataNoWatch(this.zookeeper, this.hfileRefsZNode, stat); + ZooKeeperUtil.getDataNoWatch(this.zookeeper, this.hfileRefsZNode, stat); } catch (KeeperException e) { this.abortable.abort("Failed to get stat of replication hfile references node.", e); throw e; @@ -152,7 +152,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem public List getAllPeersFromHFileRefsQueue() throws KeeperException { List result = null; try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, this.hfileRefsZNode); + result = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, this.hfileRefsZNode); } catch (KeeperException e) { this.abortable.abort("Failed to get list of all peers in hfile references node.", e); throw e; @@ -162,10 +162,10 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem @Override public List getReplicableHFiles(String peerId) throws KeeperException { - String znode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + String znode = ZooKeeperUtil.joinZNode(this.hfileRefsZNode, peerId); List result = null; try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + result = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, znode); } catch (KeeperException e) { this.abortable.abort("Failed to get list of hfile references for peerId=" + peerId, e); throw e; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java index 8e61df9419..298eb09bac 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java @@ -25,19 +25,18 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil.ZKUtilOp; import org.apache.zookeeper.KeeperException; /** @@ -74,17 +73,17 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R this(args.getZk(), args.getConf(), args.getAbortable()); } - public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf, + public ReplicationQueuesZKImpl(final ZKWatcher zk, Configuration conf, Abortable abortable) { super(zk, conf, abortable); } @Override public void init(String serverName) throws ReplicationException { - this.myQueuesZnode = ZKUtil.joinZNode(this.queuesZNode, serverName); + this.myQueuesZnode = ZooKeeperUtil.joinZNode(this.queuesZNode, serverName); try { - if (ZKUtil.checkExists(this.zookeeper, this.myQueuesZnode) < 0) { - ZKUtil.createWithParents(this.zookeeper, this.myQueuesZnode); + if (ZooKeeperUtil.checkExists(this.zookeeper, this.myQueuesZnode) < 0) { + ZooKeeperUtil.createWithParents(this.zookeeper, this.myQueuesZnode); } } catch (KeeperException e) { throw new ReplicationException("Could not initialize replication queues.", e); @@ -92,8 +91,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R if (conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) { try { - if (ZKUtil.checkExists(this.zookeeper, this.hfileRefsZNode) < 0) { - ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode); + if (ZooKeeperUtil.checkExists(this.zookeeper, this.hfileRefsZNode) < 0) { + ZooKeeperUtil.createWithParents(this.zookeeper, this.hfileRefsZNode); } } catch (KeeperException e) { throw new ReplicationException("Could not initialize hfile references replication queue.", @@ -105,7 +104,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void removeQueue(String queueId) { try { - ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.myQueuesZnode, queueId)); + ZooKeeperUtil.deleteNodeRecursively(this.zookeeper, ZooKeeperUtil + .joinZNode(this.myQueuesZnode, queueId)); } catch (KeeperException e) { this.abortable.abort("Failed to delete queue (queueId=" + queueId + ")", e); } @@ -113,10 +113,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void addLog(String queueId, String filename) throws ReplicationException { - String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId); - znode = ZKUtil.joinZNode(znode, filename); + String znode = ZooKeeperUtil.joinZNode(this.myQueuesZnode, queueId); + znode = ZooKeeperUtil.joinZNode(znode, filename); try { - ZKUtil.createWithParents(this.zookeeper, znode); + ZooKeeperUtil.createWithParents(this.zookeeper, znode); } catch (KeeperException e) { throw new ReplicationException( "Could not add log because znode could not be created. queueId=" + queueId @@ -127,9 +127,9 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void removeLog(String queueId, String filename) { try { - String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId); - znode = ZKUtil.joinZNode(znode, filename); - ZKUtil.deleteNode(this.zookeeper, znode); + String znode = ZooKeeperUtil.joinZNode(this.myQueuesZnode, queueId); + znode = ZooKeeperUtil.joinZNode(znode, filename); + ZooKeeperUtil.deleteNode(this.zookeeper, znode); } catch (KeeperException e) { this.abortable.abort("Failed to remove wal from queue (queueId=" + queueId + ", filename=" + filename + ")", e); @@ -139,10 +139,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void setLogPosition(String queueId, String filename, long position) { try { - String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId); - znode = ZKUtil.joinZNode(znode, filename); + String znode = ZooKeeperUtil.joinZNode(this.myQueuesZnode, queueId); + znode = ZooKeeperUtil.joinZNode(znode, filename); // Why serialize String of Long and not Long as bytes? - ZKUtil.setData(this.zookeeper, znode, ZKUtil.positionToByteArray(position)); + ZooKeeperUtil.setData(this.zookeeper, znode, ZKUtil.positionToByteArray(position)); } catch (KeeperException e) { this.abortable.abort("Failed to write replication wal position (filename=" + filename + ", position=" + position + ")", e); @@ -151,11 +151,11 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public long getLogPosition(String queueId, String filename) throws ReplicationException { - String clusterZnode = ZKUtil.joinZNode(this.myQueuesZnode, queueId); - String znode = ZKUtil.joinZNode(clusterZnode, filename); + String clusterZnode = ZooKeeperUtil.joinZNode(this.myQueuesZnode, queueId); + String znode = ZooKeeperUtil.joinZNode(clusterZnode, filename); byte[] bytes = null; try { - bytes = ZKUtil.getData(this.zookeeper, znode); + bytes = ZooKeeperUtil.getData(this.zookeeper, znode); } catch (KeeperException e) { throw new ReplicationException("Internal Error: could not get position in log for queueId=" + queueId + ", filename=" + filename, e); @@ -176,7 +176,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public boolean isThisOurRegionServer(String regionserver) { - return ZKUtil.joinZNode(this.queuesZNode, regionserver).equals(this.myQueuesZnode); + return ZooKeeperUtil.joinZNode(this.queuesZNode, regionserver).equals(this.myQueuesZnode); } @Override @@ -184,10 +184,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R if (isThisOurRegionServer(regionserver)) { return null; } - String rsZnodePath = ZKUtil.joinZNode(this.queuesZNode, regionserver); + String rsZnodePath = ZooKeeperUtil.joinZNode(this.queuesZNode, regionserver); List queues = null; try { - queues = ZKUtil.listChildrenNoWatch(this.zookeeper, rsZnodePath); + queues = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, rsZnodePath); } catch (KeeperException e) { this.abortable.abort("Failed to getUnClaimedQueueIds for RS" + regionserver, e); } @@ -202,11 +202,11 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void removeReplicatorIfQueueIsEmpty(String regionserver) { - String rsPath = ZKUtil.joinZNode(this.queuesZNode, regionserver); + String rsPath = ZooKeeperUtil.joinZNode(this.queuesZNode, regionserver); try { - List list = ZKUtil.listChildrenNoWatch(this.zookeeper, rsPath); + List list = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, rsPath); if (list != null && list.isEmpty()){ - ZKUtil.deleteNode(this.zookeeper, rsPath); + ZooKeeperUtil.deleteNode(this.zookeeper, rsPath); } } catch (KeeperException e) { LOG.warn("Got error while removing replicator", e); @@ -216,7 +216,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void removeAllQueues() { try { - ZKUtil.deleteNodeRecursively(this.zookeeper, this.myQueuesZnode); + ZooKeeperUtil.deleteNodeRecursively(this.zookeeper, this.myQueuesZnode); } catch (KeeperException e) { // if the znode is already expired, don't bother going further if (e instanceof KeeperException.SessionExpiredException) { @@ -229,10 +229,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public List getLogsInQueue(String queueId) { - String znode = ZKUtil.joinZNode(this.myQueuesZnode, queueId); + String znode = ZooKeeperUtil.joinZNode(this.myQueuesZnode, queueId); List result = null; try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode); + result = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, znode); } catch (KeeperException e) { this.abortable.abort("Failed to get list of wals for queueId=" + queueId, e); } @@ -243,7 +243,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R public List getAllQueues() { List listOfQueues = null; try { - listOfQueues = ZKUtil.listChildrenNoWatch(this.zookeeper, this.myQueuesZnode); + listOfQueues = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, this.myQueuesZnode); } catch (KeeperException e) { this.abortable.abort("Failed to get a list of queues for region server: " + this.myQueuesZnode, e); @@ -260,52 +260,52 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R private Pair> moveQueueUsingMulti(String znode, String peerId) { try { // hbase/replication/rs/deadrs - String deadRSZnodePath = ZKUtil.joinZNode(this.queuesZNode, znode); + String deadRSZnodePath = ZooKeeperUtil.joinZNode(this.queuesZNode, znode); List listOfOps = new ArrayList<>(); ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(peerId); String newPeerId = peerId + "-" + znode; - String newPeerZnode = ZKUtil.joinZNode(this.myQueuesZnode, newPeerId); + String newPeerZnode = ZooKeeperUtil.joinZNode(this.myQueuesZnode, newPeerId); // check the logs queue for the old peer cluster - String oldClusterZnode = ZKUtil.joinZNode(deadRSZnodePath, peerId); - List wals = ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode); + String oldClusterZnode = ZooKeeperUtil.joinZNode(deadRSZnodePath, peerId); + List wals = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode); if (!peerExists(replicationQueueInfo.getPeerId())) { LOG.warn("Peer " + replicationQueueInfo.getPeerId() + " didn't exist, will move its queue to avoid the failure of multi op"); for (String wal : wals) { - String oldWalZnode = ZKUtil.joinZNode(oldClusterZnode, wal); - listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode)); + String oldWalZnode = ZooKeeperUtil.joinZNode(oldClusterZnode, wal); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(oldWalZnode)); } - listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode)); - ZKUtil.multiOrSequential(this.zookeeper, listOfOps, false); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(oldClusterZnode)); + ZooKeeperUtil.multiOrSequential(this.zookeeper, listOfOps, false); return null; } SortedSet logQueue = new TreeSet<>(); if (wals == null || wals.isEmpty()) { - listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode)); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(oldClusterZnode)); } else { // create the new cluster znode - ZKUtilOp op = ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY); + ZKUtilOp op = ZooKeeperUtil.ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY); listOfOps.add(op); // get the offset of the logs and set it to new znodes for (String wal : wals) { - String oldWalZnode = ZKUtil.joinZNode(oldClusterZnode, wal); - byte[] logOffset = ZKUtil.getData(this.zookeeper, oldWalZnode); + String oldWalZnode = ZooKeeperUtil.joinZNode(oldClusterZnode, wal); + byte[] logOffset = ZooKeeperUtil.getData(this.zookeeper, oldWalZnode); LOG.debug("Creating " + wal + " with data " + Bytes.toString(logOffset)); - String newLogZnode = ZKUtil.joinZNode(newPeerZnode, wal); - listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset)); - listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode)); + String newLogZnode = ZooKeeperUtil.joinZNode(newPeerZnode, wal); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(newLogZnode, logOffset)); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(oldWalZnode)); logQueue.add(wal); } // add delete op for peer - listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode)); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(oldClusterZnode)); if (LOG.isTraceEnabled()) LOG.trace(" The multi list size is: " + listOfOps.size()); } - ZKUtil.multiOrSequential(this.zookeeper, listOfOps, false); + ZooKeeperUtil.multiOrSequential(this.zookeeper, listOfOps, false); LOG.info("Atomically moved " + znode + "/" + peerId + "'s WALs to my queue"); return new Pair<>(newPeerId, logQueue); @@ -322,7 +322,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void addHFileRefs(String peerId, List> pairs) throws ReplicationException { - String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + String peerZnode = ZooKeeperUtil.joinZNode(this.hfileRefsZNode, peerId); boolean debugEnabled = LOG.isDebugEnabled(); if (debugEnabled) { LOG.debug("Adding hfile references " + pairs + " in queue " + peerZnode); @@ -332,8 +332,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R List listOfOps = new ArrayList<>(size); for (int i = 0; i < size; i++) { - listOfOps.add(ZKUtilOp.createAndFailSilent( - ZKUtil.joinZNode(peerZnode, pairs.get(i).getSecond().getName()), + listOfOps.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent( + ZooKeeperUtil.joinZNode(peerZnode, pairs.get(i).getSecond().getName()), HConstants.EMPTY_BYTE_ARRAY)); } if (debugEnabled) { @@ -341,7 +341,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R + " is " + listOfOps.size()); } try { - ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); + ZooKeeperUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { throw new ReplicationException("Failed to create hfile reference znode=" + e.getPath(), e); } @@ -349,7 +349,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void removeHFileRefs(String peerId, List files) { - String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + String peerZnode = ZooKeeperUtil.joinZNode(this.hfileRefsZNode, peerId); boolean debugEnabled = LOG.isDebugEnabled(); if (debugEnabled) { LOG.debug("Removing hfile references " + files + " from queue " + peerZnode); @@ -359,14 +359,15 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R List listOfOps = new ArrayList<>(size); for (int i = 0; i < size; i++) { - listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i)))); + listOfOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent( + ZooKeeperUtil.joinZNode(peerZnode, files.get(i)))); } if (debugEnabled) { LOG.debug(" The multi list size for removing hfile references in zk for node " + peerZnode + " is " + listOfOps.size()); } try { - ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); + ZooKeeperUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { LOG.error("Failed to remove hfile reference znode=" + e.getPath(), e); } @@ -374,11 +375,11 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void addPeerToHFileRefs(String peerId) throws ReplicationException { - String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + String peerZnode = ZooKeeperUtil.joinZNode(this.hfileRefsZNode, peerId); try { - if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) { + if (ZooKeeperUtil.checkExists(this.zookeeper, peerZnode) == -1) { LOG.info("Adding peer " + peerId + " to hfile reference queue."); - ZKUtil.createWithParents(this.zookeeper, peerZnode); + ZooKeeperUtil.createWithParents(this.zookeeper, peerZnode); } } catch (KeeperException e) { throw new ReplicationException("Failed to add peer " + peerId + " to hfile reference queue.", @@ -388,16 +389,16 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R @Override public void removePeerFromHFileRefs(String peerId) { - final String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId); + final String peerZnode = ZooKeeperUtil.joinZNode(this.hfileRefsZNode, peerId); try { - if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) { + if (ZooKeeperUtil.checkExists(this.zookeeper, peerZnode) == -1) { if (LOG.isDebugEnabled()) { LOG.debug("Peer " + peerZnode + " not found in hfile reference queue."); } return; } else { LOG.info("Removing peer " + peerZnode + " from hfile reference queue."); - ZKUtil.deleteNodeRecursively(this.zookeeper, peerZnode); + ZooKeeperUtil.deleteNodeRecursively(this.zookeeper, peerZnode); } } catch (KeeperException e) { LOG.error("Ignoring the exception to remove peer " + peerId + " from hfile reference queue.", diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java index c6501e189d..08a37b8096 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java @@ -27,12 +27,12 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -60,7 +60,7 @@ public abstract class ReplicationStateZKBase { /** The name of the znode that contains tableCFs */ protected final String tableCFsNodeName; - protected final ZooKeeperWatcher zookeeper; + protected final ZKWatcher zookeeper; protected final Configuration conf; protected final Abortable abortable; @@ -73,7 +73,7 @@ public abstract class ReplicationStateZKBase { "zookeeper.znode.replication.hfile.refs"; public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs"; - public ReplicationStateZKBase(ZooKeeperWatcher zookeeper, Configuration conf, + public ReplicationStateZKBase(ZKWatcher zookeeper, Configuration conf, Abortable abortable) { this.zookeeper = zookeeper; this.conf = conf; @@ -87,17 +87,17 @@ public abstract class ReplicationStateZKBase { this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state"); this.tableCFsNodeName = conf.get("zookeeper.znode.replication.peers.tableCFs", "tableCFs"); this.ourClusterKey = ZKConfig.getZooKeeperClusterKey(this.conf); - this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.znodePaths.baseZNode, + this.replicationZNode = ZooKeeperUtil.joinZNode(this.zookeeper.znodePaths.baseZNode, replicationZNodeName); - this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName); - this.queuesZNode = ZKUtil.joinZNode(replicationZNode, queuesZNodeName); - this.hfileRefsZNode = ZKUtil.joinZNode(replicationZNode, hfileRefsZNodeName); + this.peersZNode = ZooKeeperUtil.joinZNode(replicationZNode, peersZNodeName); + this.queuesZNode = ZooKeeperUtil.joinZNode(replicationZNode, queuesZNodeName); + this.hfileRefsZNode = ZooKeeperUtil.joinZNode(replicationZNode, hfileRefsZNodeName); } public List getListOfReplicators() { List result = null; try { - result = ZKUtil.listChildrenNoWatch(this.zookeeper, this.queuesZNode); + result = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, this.queuesZNode); } catch (KeeperException e) { this.abortable.abort("Failed to get list of replicators", e); } @@ -127,7 +127,7 @@ public abstract class ReplicationStateZKBase { } protected boolean peerExists(String id) throws KeeperException { - return ZKUtil.checkExists(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id)) >= 0; + return ZooKeeperUtil.checkExists(this.zookeeper, ZooKeeperUtil.joinZNode(this.peersZNode, id)) >= 0; } /** @@ -141,15 +141,15 @@ public abstract class ReplicationStateZKBase { @VisibleForTesting protected String getTableCFsNode(String id) { - return ZKUtil.joinZNode(this.peersZNode, ZKUtil.joinZNode(id, this.tableCFsNodeName)); + return ZooKeeperUtil.joinZNode(this.peersZNode, ZooKeeperUtil.joinZNode(id, this.tableCFsNodeName)); } @VisibleForTesting protected String getPeerStateNode(String id) { - return ZKUtil.joinZNode(this.peersZNode, ZKUtil.joinZNode(id, this.peerStateNodeName)); + return ZooKeeperUtil.joinZNode(this.peersZNode, ZooKeeperUtil.joinZNode(id, this.peerStateNodeName)); } @VisibleForTesting protected String getPeerNode(String id) { - return ZKUtil.joinZNode(this.peersZNode, id); + return ZooKeeperUtil.joinZNode(this.peersZNode, id); } } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index ade1c4d42e..5827e501ee 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -24,13 +24,14 @@ import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -50,7 +51,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements private final ArrayList otherRegionServers = new ArrayList<>(); private final ReplicationPeers replicationPeers; - public ReplicationTrackerZKImpl(ZooKeeperWatcher zookeeper, + public ReplicationTrackerZKImpl(ZKWatcher zookeeper, final ReplicationPeers replicationPeers, Configuration conf, Abortable abortable, Stoppable stopper) { super(zookeeper, conf, abortable); @@ -88,12 +89,12 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements * Watcher used to be notified of the other region server's death in the local cluster. It * initiates the process to transfer the queues if it is able to grab the lock. */ - public class OtherRegionServerWatcher extends ZooKeeperListener { + public class OtherRegionServerWatcher extends ZKListener { /** * Construct a ZooKeeper event listener. */ - public OtherRegionServerWatcher(ZooKeeperWatcher watcher) { + public OtherRegionServerWatcher(ZKWatcher watcher) { super(watcher); } @@ -145,12 +146,12 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements /** * Watcher used to follow the creation and deletion of peer clusters. */ - public class PeersWatcher extends ZooKeeperListener { + public class PeersWatcher extends ZKListener { /** * Construct a ZooKeeper event listener. */ - public PeersWatcher(ZooKeeperWatcher watcher) { + public PeersWatcher(ZKWatcher watcher) { super(watcher); } @@ -241,7 +242,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements private List getRegisteredRegionServers() { List result = null; try { - result = ZKUtil.listChildrenAndWatchThem(this.zookeeper, this.zookeeper.znodePaths.rsZNode); + result = ZooKeeperUtil.listChildrenAndWatchThem(this.zookeeper, this.zookeeper.znodePaths.rsZNode); } catch (KeeperException e) { this.abortable.abort("Get list of registered region servers", e); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java index cf1ff203b9..1f239f89de 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java @@ -37,11 +37,10 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import java.io.IOException; @@ -77,7 +76,7 @@ public class TableBasedReplicationQueuesImpl extends ReplicationTableBase this(args.getConf(), args.getAbortable(), args.getZk()); } - public TableBasedReplicationQueuesImpl(Configuration conf, Abortable abort, ZooKeeperWatcher zkw) + public TableBasedReplicationQueuesImpl(Configuration conf, Abortable abort, ZKWatcher zkw) throws IOException { super(conf, abort); replicationState = new ReplicationStateZKBase(zkw, conf, abort) {}; diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index 9520f5fbf8..2660defcdd 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -74,8 +74,9 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; import org.apache.hadoop.hbase.security.access.AccessControlLists; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -138,7 +139,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { private final MasterServices masterServices; private Table rsGroupTable; private final ClusterConnection conn; - private final ZooKeeperWatcher watcher; + private final ZKWatcher watcher; private final RSGroupStartupWorker rsGroupStartupWorker = new RSGroupStartupWorker(); // contains list of groups that were last flushed to persistent store private Set prevRSGroups = new HashSet<>(); @@ -313,13 +314,13 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { } List retrieveGroupListFromZookeeper() throws IOException { - String groupBasePath = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode); + String groupBasePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode); List RSGroupInfoList = Lists.newArrayList(); //Overwrite any info stored by table, this takes precedence try { - if(ZKUtil.checkExists(watcher, groupBasePath) != -1) { - for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) { - byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode)); + if(ZooKeeperUtil.checkExists(watcher, groupBasePath) != -1) { + for(String znode: ZooKeeperUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) { + byte[] data = ZooKeeperUtil.getData(watcher, ZooKeeperUtil.joinZNode(groupBasePath, znode)); if(data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); ByteArrayInputStream bis = new ByteArrayInputStream( @@ -463,30 +464,30 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { resetRSGroupAndTableMaps(newGroupMap, newTableMap); try { - String groupBasePath = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode); - ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC); + String groupBasePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, rsGroupZNode); + ZooKeeperUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC); - List zkOps = new ArrayList<>(newGroupMap.size()); + List zkOps = new ArrayList<>(newGroupMap.size()); for(String groupName : prevRSGroups) { if(!newGroupMap.containsKey(groupName)) { - String znode = ZKUtil.joinZNode(groupBasePath, groupName); - zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + String znode = ZooKeeperUtil.joinZNode(groupBasePath, groupName); + zkOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(znode)); } } for (RSGroupInfo RSGroupInfo : newGroupMap.values()) { - String znode = ZKUtil.joinZNode(groupBasePath, RSGroupInfo.getName()); + String znode = ZooKeeperUtil.joinZNode(groupBasePath, RSGroupInfo.getName()); RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo); LOG.debug("Updating znode: "+znode); - ZKUtil.createAndFailSilent(watcher, znode); - zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); - zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, + ZooKeeperUtil.createAndFailSilent(watcher, znode); + zkOps.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + zkOps.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(znode, ProtobufUtil.prependPBMagic(proto.toByteArray()))); } LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); - ZKUtil.multiOrSequential(watcher, zkOps, false); + ZooKeeperUtil.multiOrSequential(watcher, zkOps, false); } catch (KeeperException e) { LOG.error("Failed to write to rsGroupZNode", e); masterServices.abort("Failed to write to rsGroupZNode", e); @@ -524,7 +525,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { LOG.debug("Reading online RS from zookeeper"); List servers = new LinkedList<>(); try { - for (String el: ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.rsZNode)) { + for (String el: ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.rsZNode)) { servers.add(ServerName.parseServerName(el)); } } catch (KeeperException e) { @@ -788,12 +789,12 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { = MultiRowMutationProtos.MutateRowsRequest.newBuilder(); for (Mutation mutation : mutations) { if (mutation instanceof Put) { - mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation( + mmrBuilder.addMutationRequest(ProtobufUtil.toMutation( org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.PUT, mutation)); } else if (mutation instanceof Delete) { mmrBuilder.addMutationRequest( - org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation( + ProtobufUtil.toMutation( org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto. MutationType.DELETE, mutation)); } else { diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java index ea019bc745..e2d48e02e1 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java @@ -21,6 +21,8 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; @@ -30,8 +32,6 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.junit.Assert; @@ -44,14 +44,14 @@ import java.util.Set; @InterfaceAudience.Private public class VerifyingRSGroupAdminClient implements RSGroupAdmin { private Table table; - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private RSGroupAdmin wrapped; public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf) throws IOException { wrapped = RSGroupAdmin; table = ConnectionFactory.createConnection(conf).getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME); - zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null); + zkw = new ZKWatcher(conf, this.getClass().getSimpleName(), null); } @Override @@ -124,9 +124,9 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin { Assert.assertEquals(Sets.newHashSet(groupMap.values()), Sets.newHashSet(wrapped.listRSGroups())); try { - String groupBasePath = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup"); - for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { - byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode)); + String groupBasePath = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "rsgroup"); + for(String znode: ZooKeeperUtil.listChildrenNoWatch(zkw, groupBasePath)) { + byte[] data = ZooKeeperUtil.getData(zkw, ZooKeeperUtil.joinZNode(groupBasePath, znode)); if(data.length > 0) { ProtobufUtil.expectPBMagicPrefix(data); ByteArrayInputStream bis = new ByteArrayInputStream( diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index ea02f26efc..c17135f7c9 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -423,6 +423,16 @@ org.apache.hbase hbase-metrics + + org.apache.hbase + hbase-zookeeper + + + org.apache.hbase + hbase-zookeeper + test-jar + test + org.apache.hbase diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index a6b39f3f7a..0d9eba8968 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import java.io.IOException; @@ -43,7 +43,7 @@ public interface Server extends Abortable, Stoppable { /** * Gets the ZooKeeper instance for this server. */ - ZooKeeperWatcher getZooKeeper(); + ZKWatcher getZooKeeper(); /** * Returns a reference to the servers' connection. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java index 6e40295940..d7bd38401b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java @@ -21,13 +21,13 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.zookeeper.KeeperException; import java.io.IOException; @@ -47,12 +47,12 @@ import java.util.concurrent.ConcurrentSkipListMap; * */ @InterfaceAudience.Private -public class ZKNamespaceManager extends ZooKeeperListener { +public class ZKNamespaceManager extends ZKListener { private static final Log LOG = LogFactory.getLog(ZKNamespaceManager.class); private final String nsZNode; private volatile NavigableMap cache; - public ZKNamespaceManager(ZooKeeperWatcher zkw) throws IOException { + public ZKNamespaceManager(ZKWatcher zkw) throws IOException { super(zkw); nsZNode = zkw.znodePaths.namespaceZNode; cache = new ConcurrentSkipListMap<>(); @@ -61,14 +61,14 @@ public class ZKNamespaceManager extends ZooKeeperListener { public void start() throws IOException { watcher.registerListener(this); try { - if (ZKUtil.watchAndCheckExists(watcher, nsZNode)) { - List existing = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + if (ZooKeeperUtil.watchAndCheckExists(watcher, nsZNode)) { + List existing = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); if (existing != null) { refreshNodes(existing); } } else { - ZKUtil.createWithParents(watcher, nsZNode); + ZooKeeperUtil.createWithParents(watcher, nsZNode); } } catch (KeeperException e) { throw new IOException("Failed to initialize ZKNamespaceManager", e); @@ -102,8 +102,8 @@ public class ZKNamespaceManager extends ZooKeeperListener { public void nodeCreated(String path) { if (nsZNode.equals(path)) { try { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); refreshNodes(nodes); } catch (KeeperException ke) { String msg = "Error reading data from zookeeper"; @@ -119,17 +119,17 @@ public class ZKNamespaceManager extends ZooKeeperListener { @Override public void nodeDeleted(String path) { - if (nsZNode.equals(ZKUtil.getParent(path))) { - String nsName = ZKUtil.getNodeName(path); + if (nsZNode.equals(ZooKeeperUtil.getParent(path))) { + String nsName = ZooKeeperUtil.getNodeName(path); cache.remove(nsName); } } @Override public void nodeDataChanged(String path) { - if (nsZNode.equals(ZKUtil.getParent(path))) { + if (nsZNode.equals(ZooKeeperUtil.getParent(path))) { try { - byte[] data = ZKUtil.getDataAndWatch(watcher, path); + byte[] data = ZooKeeperUtil.getDataAndWatch(watcher, path); NamespaceDescriptor ns = ProtobufUtil.toNamespaceDescriptor( HBaseProtos.NamespaceDescriptor.parseFrom(data)); @@ -151,8 +151,8 @@ public class ZKNamespaceManager extends ZooKeeperListener { public void nodeChildrenChanged(String path) { if (nsZNode.equals(path)) { try { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error("Error reading data from zookeeper for path "+path, ke); @@ -165,9 +165,9 @@ public class ZKNamespaceManager extends ZooKeeperListener { } private void deleteNamespace(String name) throws IOException { - String zNode = ZKUtil.joinZNode(nsZNode, name); + String zNode = ZooKeeperUtil.joinZNode(nsZNode, name); try { - ZKUtil.deleteNode(watcher, zNode); + ZooKeeperUtil.deleteNode(watcher, zNode); } catch (KeeperException e) { if (e instanceof KeeperException.NoNodeException) { // If the node does not exist, it could be already deleted. Continue without fail. @@ -180,10 +180,10 @@ public class ZKNamespaceManager extends ZooKeeperListener { } private void writeNamespace(NamespaceDescriptor ns) throws IOException { - String zNode = ZKUtil.joinZNode(nsZNode, ns.getName()); + String zNode = ZooKeeperUtil.joinZNode(nsZNode, ns.getName()); try { - ZKUtil.createWithParents(watcher, zNode); - ZKUtil.updateExistingNodeData(watcher, zNode, + ZooKeeperUtil.createWithParents(watcher, zNode); + ZooKeeperUtil.updateExistingNodeData(watcher, zNode, ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1); } catch (KeeperException e) { LOG.error("Failed updating permissions for namespace "+ns.getName(), e); @@ -191,11 +191,11 @@ public class ZKNamespaceManager extends ZooKeeperListener { } } - private void refreshNodes(List nodes) throws IOException { - for (ZKUtil.NodeAndData n : nodes) { + private void refreshNodes(List nodes) throws IOException { + for (ZooKeeperUtil.NodeAndData n : nodes) { if (n.isEmpty()) continue; String path = n.getNode(); - String namespace = ZKUtil.getNodeName(path); + String namespace = ZooKeeperUtil.getNodeName(path); byte[] nodeData = n.getData(); if (LOG.isDebugEnabled()) { LOG.debug("Updating namespace cache from node "+namespace+" with data: "+ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index bd8a58e467..d98f07b218 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -31,8 +31,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -131,7 +132,7 @@ public class ZNodeClearer { * @param rsZnodePath from HBASE_ZNODE_FILE * @return String representation of ServerName or null if fails */ - + public static String parseMasterServerName(String rsZnodePath) { String masterServerName = null; try { @@ -140,12 +141,12 @@ public class ZNodeClearer { } catch (IndexOutOfBoundsException e) { LOG.warn("String " + rsZnodePath + " has wrong format", e); } - return masterServerName; + return masterServerName; } - + /** - * - * @return true if cluster is configured with master-rs collocation + * + * @return true if cluster is configured with master-rs collocation */ private static boolean tablesOnMaster(Configuration conf) { boolean tablesOnMaster = true; @@ -166,9 +167,9 @@ public class ZNodeClearer { Configuration tempConf = new Configuration(conf); tempConf.setInt("zookeeper.recovery.retry", 0); - ZooKeeperWatcher zkw; + ZKWatcher zkw; try { - zkw = new ZooKeeperWatcher(tempConf, "clean znode for master", + zkw = new ZKWatcher(tempConf, "clean znode for master", new Abortable() { @Override public void abort(String why, Throwable e) {} @Override public boolean isAborted() { return false; } @@ -182,9 +183,9 @@ public class ZNodeClearer { try { znodeFileContent = ZNodeClearer.readMyEphemeralNodeOnDisk(); if(ZNodeClearer.tablesOnMaster(conf)) { - //In case of master crash also remove rsZnode since master is also regionserver - ZKUtil.deleteNodeFailSilent(zkw, znodeFileContent); - return MasterAddressTracker.deleteIfEquals(zkw, + //In case of master crash also remove rsZnode since master is also regionserver + ZooKeeperUtil.deleteNodeFailSilent(zkw, znodeFileContent); + return MasterAddressTracker.deleteIfEquals(zkw, ZNodeClearer.parseMasterServerName(znodeFileContent)); } else { return MasterAddressTracker.deleteIfEquals(zkw, znodeFileContent); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 61d6a0288e..b0bd2816e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -22,12 +22,12 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -41,12 +41,12 @@ class HFileArchiveManager { private final String archiveZnode; private static final Log LOG = LogFactory.getLog(HFileArchiveManager.class); - private final ZooKeeperWatcher zooKeeper; + private final ZKWatcher zooKeeper; private volatile boolean stopped = false; public HFileArchiveManager(Connection connection, Configuration conf) throws ZooKeeperConnectionException, IOException { - this.zooKeeper = new ZooKeeperWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), + this.zooKeeper = new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), this.zooKeeper); @@ -86,7 +86,7 @@ class HFileArchiveManager { public HFileArchiveManager disableHFileBackup() throws IOException { LOG.debug("Disabling backups on all tables."); try { - ZKUtil.deleteNodeRecursively(this.zooKeeper, archiveZnode); + ZooKeeperUtil.deleteNodeRecursively(this.zooKeeper, archiveZnode); return this; } catch (KeeperException e) { throw new IOException("Unexpected ZK exception!", e); @@ -103,15 +103,15 @@ class HFileArchiveManager { * @param table table name on which to enable archiving * @throws KeeperException */ - private void enable(ZooKeeperWatcher zooKeeper, byte[] table) + private void enable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { LOG.debug("Ensuring archiving znode exists"); - ZKUtil.createAndFailSilent(zooKeeper, archiveZnode); + ZooKeeperUtil.createAndFailSilent(zooKeeper, archiveZnode); // then add the table to the list of znodes to archive String tableNode = this.getTableNode(table); LOG.debug("Creating: " + tableNode + ", data: []"); - ZKUtil.createSetData(zooKeeper, tableNode, new byte[0]); + ZooKeeperUtil.createSetData(zooKeeper, tableNode, new byte[0]); } /** @@ -122,12 +122,12 @@ class HFileArchiveManager { * @param table name of the table to disable * @throws KeeperException if an unexpected ZK connection issues occurs */ - private void disable(ZooKeeperWatcher zooKeeper, byte[] table) throws KeeperException { + private void disable(ZKWatcher zooKeeper, byte[] table) throws KeeperException { // ensure the latest state of the archive node is found zooKeeper.sync(archiveZnode); // if the top-level archive node is gone, then we are done - if (ZKUtil.checkExists(zooKeeper, archiveZnode) < 0) { + if (ZooKeeperUtil.checkExists(zooKeeper, archiveZnode) < 0) { return; } // delete the table node, from the archive @@ -136,7 +136,7 @@ class HFileArchiveManager { zooKeeper.sync(tableNode); LOG.debug("Attempting to delete table node:" + tableNode); - ZKUtil.deleteNodeRecursively(zooKeeper, tableNode); + ZooKeeperUtil.deleteNodeRecursively(zooKeeper, tableNode); } public void stop() { @@ -155,7 +155,7 @@ class HFileArchiveManager { */ public boolean isArchivingEnabled(byte[] table) throws KeeperException { String tableNode = this.getTableNode(table); - return ZKUtil.checkExists(zooKeeper, tableNode) >= 0; + return ZooKeeperUtil.checkExists(zooKeeper, tableNode) >= 0; } /** @@ -164,6 +164,6 @@ class HFileArchiveManager { * @return znode for the table's archive status */ private String getTableNode(byte[] table) { - return ZKUtil.joinZNode(archiveZnode, Bytes.toString(table)); + return ZooKeeperUtil.joinZNode(archiveZnode, Bytes.toString(table)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java index 72c292f244..e772030645 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java @@ -22,12 +22,12 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -38,14 +38,14 @@ import org.apache.zookeeper.KeeperException; * archive. */ @InterfaceAudience.Private -public class TableHFileArchiveTracker extends ZooKeeperListener { +public class TableHFileArchiveTracker extends ZKListener { private static final Log LOG = LogFactory.getLog(TableHFileArchiveTracker.class); public static final String HFILE_ARCHIVE_ZNODE_PARENT = "hfilearchive"; private HFileArchiveTableMonitor monitor; private String archiveHFileZNode; private boolean stopped = false; - private TableHFileArchiveTracker(ZooKeeperWatcher watcher, HFileArchiveTableMonitor monitor) { + private TableHFileArchiveTracker(ZKWatcher watcher, HFileArchiveTableMonitor monitor) { super(watcher); watcher.registerListener(this); this.monitor = monitor; @@ -110,10 +110,10 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { * @throws KeeperException if an unexpected zk exception occurs */ private void addAndReWatchTable(String tableZnode) throws KeeperException { - getMonitor().addTable(ZKUtil.getNodeName(tableZnode)); + getMonitor().addTable(ZooKeeperUtil.getNodeName(tableZnode)); // re-add a watch to the table created // and check to make sure it wasn't deleted - if (!ZKUtil.watchAndCheckExists(watcher, tableZnode)) { + if (!ZooKeeperUtil.watchAndCheckExists(watcher, tableZnode)) { safeStopTrackingTable(tableZnode); } } @@ -125,9 +125,9 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { * @throws KeeperException if an unexpected zk exception occurs */ private void safeStopTrackingTable(String tableZnode) throws KeeperException { - getMonitor().removeTable(ZKUtil.getNodeName(tableZnode)); + getMonitor().removeTable(ZooKeeperUtil.getNodeName(tableZnode)); // if the table exists, then add and rewatch it - if (ZKUtil.checkExists(watcher, tableZnode) >= 0) { + if (ZooKeeperUtil.checkExists(watcher, tableZnode) >= 0) { addAndReWatchTable(tableZnode); } } @@ -155,7 +155,7 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { // exists for that matter), so its better not to add unnecessary load to // zk for watches. If the table is created again, then we will get the // notification in childrenChanaged. - getMonitor().removeTable(ZKUtil.getNodeName(path)); + getMonitor().removeTable(ZooKeeperUtil.getNodeName(path)); } /** @@ -164,7 +164,7 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { */ private void checkEnabledAndUpdate() { try { - if (ZKUtil.watchAndCheckExists(watcher, archiveHFileZNode)) { + if (ZooKeeperUtil.watchAndCheckExists(watcher, archiveHFileZNode)) { LOG.debug(archiveHFileZNode + " znode does exist, checking for tables to archive"); // update the tables we should backup, to get the most recent state. @@ -188,7 +188,7 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { // get the children and watch for new children LOG.debug("Updating watches on tables to archive."); // get the children and add watches for each of the children - List tables = ZKUtil.listChildrenAndWatchThem(watcher, archiveHFileZNode); + List tables = ZooKeeperUtil.listChildrenAndWatchThem(watcher, archiveHFileZNode); LOG.debug("Starting archive for tables:" + tables); // if archiving is still enabled if (tables != null && tables.size() > 0) { @@ -235,7 +235,7 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { */ public static TableHFileArchiveTracker create(Configuration conf) throws ZooKeeperConnectionException, IOException { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "hfileArchiveCleaner", null); + ZKWatcher zkw = new ZKWatcher(conf, "hfileArchiveCleaner", null); return create(zkw, new HFileArchiveTableMonitor()); } @@ -247,12 +247,12 @@ public class TableHFileArchiveTracker extends ZooKeeperListener { * @return ZooKeeper tracker to monitor for this server if this server should archive hfiles for a * given table */ - private static TableHFileArchiveTracker create(ZooKeeperWatcher zkw, + private static TableHFileArchiveTracker create(ZKWatcher zkw, HFileArchiveTableMonitor monitor) { return new TableHFileArchiveTracker(zkw, monitor); } - public ZooKeeperWatcher getZooKeeperWatcher() { + public ZKWatcher getZooKeeperWatcher() { return this.watcher; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index 6c173cf0f9..a5f1bcd42d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -19,13 +19,13 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -148,8 +148,8 @@ public class ZKTableArchiveClient extends Configured { * @param zooKeeper zookeeper to used for building the full path * @return get the znode for long-term archival of a table for */ - public static String getArchiveZNode(Configuration conf, ZooKeeperWatcher zooKeeper) { - return ZKUtil.joinZNode(zooKeeper.znodePaths.baseZNode, conf.get( + public static String getArchiveZNode(Configuration conf, ZKWatcher zooKeeper) { + return ZooKeeperUtil.joinZNode(zooKeeper.znodePaths.baseZNode, conf.get( ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY, TableHFileArchiveTracker.HFILE_ARCHIVE_ZNODE_PARENT)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index ef99de0847..73658dedfb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -37,7 +37,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; @@ -53,11 +52,12 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; -import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; +import org.apache.hadoop.hbase.zookeeper.RecoverableZK; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.AsyncCallback; @@ -74,7 +74,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLo * {@link SplitLogManagerCoordination} */ @InterfaceAudience.Private -public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements +public class ZKSplitLogManagerCoordination extends ZKListener implements SplitLogManagerCoordination { public static final int DEFAULT_TIMEOUT = 120000; @@ -102,7 +102,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements private boolean isDrainingDone = false; - public ZKSplitLogManagerCoordination(Configuration conf, ZooKeeperWatcher watcher) { + public ZKSplitLogManagerCoordination(Configuration conf, ZKWatcher watcher) { super(watcher); this.conf = conf; taskFinisher = new TaskFinisher() { @@ -140,7 +140,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements public int remainingTasksInCoordination() { int count = 0; try { - List tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); + List tasks = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { @@ -256,7 +256,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements // therefore this behavior is safe. SplitLogTask slt = new SplitLogTask.Done(this.details.getServerName(), getRecoveryMode()); this.watcher - .getRecoverableZooKeeper() + .getRecoverableZK() .getZooKeeper() .create(ZKSplitLog.getRescanNode(watcher), slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL, new CreateRescanAsyncCallback(), Long.valueOf(retries)); @@ -271,7 +271,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements public void checkTaskStillAvailable(String path) { // A negative retry count will lead to ignoring all error processing. this.watcher - .getRecoverableZooKeeper() + .getRecoverableZK() .getZooKeeper() .getData(path, this.watcher, new GetDataAsyncCallback(), Long.valueOf(-1) /* retry count */); @@ -292,7 +292,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements final String metaEncodeRegionName = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName(); int count = 0; try { - List tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); + List tasks = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { @@ -310,7 +310,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements lastRecoveringNodeCreationTime = Long.MAX_VALUE; } else if (!recoveredServerNameSet.isEmpty()) { // Remove recovering regions which don't have any RS associated with it - List regions = ZKUtil.listChildrenNoWatch(watcher, + List regions = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.recoveringRegionsZNode); if (regions != null) { int listSize = regions.size(); @@ -328,21 +328,22 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements continue; } } - String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, region); - List failedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath); + String nodePath = ZooKeeperUtil + .joinZNode(watcher.znodePaths.recoveringRegionsZNode, region); + List failedServers = ZooKeeperUtil.listChildrenNoWatch(watcher, nodePath); if (failedServers == null || failedServers.isEmpty()) { - ZKUtil.deleteNode(watcher, nodePath); + ZooKeeperUtil.deleteNode(watcher, nodePath); continue; } if (recoveredServerNameSet.containsAll(failedServers)) { - ZKUtil.deleteNodeRecursively(watcher, nodePath); + ZooKeeperUtil.deleteNodeRecursively(watcher, nodePath); } else { int tmpFailedServerSize = failedServers.size(); for (int j = 0; j < tmpFailedServerSize; j++) { String failedServer = failedServers.get(j); if (recoveredServerNameSet.contains(failedServer)) { - String tmpPath = ZKUtil.joinZNode(nodePath, failedServer); - ZKUtil.deleteNode(watcher, tmpPath); + String tmpPath = ZooKeeperUtil.joinZNode(nodePath, failedServer); + ZooKeeperUtil.deleteNode(watcher, tmpPath); } } } @@ -360,7 +361,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements // Once a task znode is ready for delete, that is it is in the TASK_DONE // state, then no one should be writing to it anymore. That is no one // will be updating the znode version any more. - this.watcher.getRecoverableZooKeeper().getZooKeeper() + this.watcher.getRecoverableZK().getZooKeeper() .delete(path, -1, new DeleteAsyncCallback(), retries); } @@ -416,7 +417,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements private void createNode(String path, Long retry_count) { SplitLogTask slt = new SplitLogTask.Unassigned(details.getServerName(), getRecoveryMode()); - ZKUtil.asyncCreate(this.watcher, path, slt.toByteArray(), new CreateAsyncCallback(), + ZooKeeperUtil.asyncCreate(this.watcher, path, slt.toByteArray(), new CreateAsyncCallback(), retry_count); SplitLogCounters.tot_mgr_node_create_queued.increment(); return; @@ -434,7 +435,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements } private void getDataSetWatch(String path, Long retry_count) { - this.watcher.getRecoverableZooKeeper().getZooKeeper() + this.watcher.getRecoverableZK().getZooKeeper() .getData(path, this.watcher, new GetDataAsyncCallback(), retry_count); SplitLogCounters.tot_mgr_get_data_queued.increment(); } @@ -453,7 +454,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements setDone(path, FAILURE); return; } - data = RecoverableZooKeeper.removeMetaData(data); + data = RecoverableZK.removeMetaData(data); SplitLogTask slt = SplitLogTask.parseFrom(data); if (slt.isUnassigned()) { LOG.debug("task not yet acquired " + path + " ver = " + version); @@ -562,7 +563,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements private void lookForOrphans() { List orphans; try { - orphans = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.znodePaths.splitLogZNode); + orphans = ZooKeeperUtil.listChildrenNoWatch(this.watcher, this.watcher.znodePaths.splitLogZNode); if (orphans == null) { LOG.warn("could not get children of " + this.watcher.znodePaths.splitLogZNode); return; @@ -576,7 +577,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements int listSize = orphans.size(); for (int i = 0; i < listSize; i++) { String path = orphans.get(i); - String nodepath = ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, path); + String nodepath = ZooKeeperUtil.joinZNode(watcher.znodePaths.splitLogZNode, path); if (ZKSplitLog.isRescanNode(watcher, nodepath)) { rescan_nodes++; LOG.debug("found orphan rescan node " + path); @@ -604,7 +605,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements long retries = this.zkretries; do { - String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, + String nodePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, regionEncodeName); long lastRecordedFlushedSequenceId = -1; try { @@ -616,25 +617,25 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements * znode layout: .../region_id[last known flushed sequence id]/failed server[last known * flushed sequence id for the server] */ - byte[] data = ZKUtil.getData(this.watcher, nodePath); + byte[] data = ZooKeeperUtil.getData(this.watcher, nodePath); if (data == null) { - ZKUtil + ZooKeeperUtil .createSetData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId)); } else { lastRecordedFlushedSequenceId = - ZKSplitLog.parseLastFlushedSequenceIdFrom(data); + ZKUtil.parseLastFlushedSequenceIdFrom(data); if (lastRecordedFlushedSequenceId < lastSequenceId) { // update last flushed sequence id in the region level - ZKUtil.setData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId)); + ZooKeeperUtil.setData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId)); } } // go one level deeper with server name - nodePath = ZKUtil.joinZNode(nodePath, serverName.getServerName()); + nodePath = ZooKeeperUtil.joinZNode(nodePath, serverName.getServerName()); if (lastSequenceId <= lastRecordedFlushedSequenceId) { // the newly assigned RS failed even before any flush to the region lastSequenceId = lastRecordedFlushedSequenceId; } - ZKUtil.createSetData(this.watcher, nodePath, + ZooKeeperUtil.createSetData(this.watcher, nodePath, ZKUtil.regionSequenceIdsToByteArray(lastSequenceId, null)); if (LOG.isDebugEnabled()) { LOG.debug("Marked " + regionEncodeName + " recovering from " + serverName + @@ -681,15 +682,15 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements throws IOException, InterruptedIOException { try { - List tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); + List tasks = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { String t = tasks.get(i); byte[] data; try { - data = ZKUtil.getData(this.watcher, - ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, t)); + data = ZooKeeperUtil.getData(this.watcher, + ZooKeeperUtil.joinZNode(watcher.znodePaths.splitLogZNode, t)); } catch (InterruptedException e) { throw new InterruptedIOException(); } @@ -717,16 +718,16 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements } // remove recovering regions which doesn't have any RS associated with it - List regions = ZKUtil.listChildrenNoWatch(watcher, + List regions = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.recoveringRegionsZNode); if (regions != null) { int listSize = regions.size(); for (int i = 0; i < listSize; i++) { - String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, + String nodePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, regions.get(i)); - List regionFailedServers = ZKUtil.listChildrenNoWatch(watcher, nodePath); + List regionFailedServers = ZooKeeperUtil.listChildrenNoWatch(watcher, nodePath); if (regionFailedServers == null || regionFailedServers.isEmpty()) { - ZKUtil.deleteNode(watcher, nodePath); + ZooKeeperUtil.deleteNode(watcher, nodePath); continue; } boolean needMoreRecovery = false; @@ -738,7 +739,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements } } if (!needMoreRecovery) { - ZKUtil.deleteNodeRecursively(watcher, nodePath); + ZooKeeperUtil.deleteNodeRecursively(watcher, nodePath); } } } @@ -758,7 +759,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements } private List listSplitLogTasks() throws KeeperException { - List taskOrRescanList = ZKUtil.listChildrenNoWatch(watcher, + List taskOrRescanList = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.splitLogZNode); if (taskOrRescanList == null || taskOrRescanList.isEmpty()) { return Collections. emptyList(); @@ -804,7 +805,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements // Firstly check if there are outstanding recovering regions try { - List regions = ZKUtil.listChildrenNoWatch(watcher, + List regions = ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.recoveringRegionsZNode); if (regions != null && !regions.isEmpty()) { hasRecoveringRegions = true; @@ -821,8 +822,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements for (int i = 0; i < listSize; i++) { String task = tasks.get(i); try { - byte[] data = ZKUtil.getData(this.watcher, - ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, task)); + byte[] data = ZooKeeperUtil.getData(this.watcher, + ZooKeeperUtil.joinZNode(watcher.znodePaths.splitLogZNode, task)); if (data == null) continue; SplitLogTask slt = SplitLogTask.parseFrom(data); previousRecoveryMode = slt.getMode(); @@ -882,7 +883,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements // blocking zk call but this is done from the timeout thread SplitLogTask slt = new SplitLogTask.Unassigned(this.details.getServerName(), getRecoveryMode()); - if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { + if (ZooKeeperUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("failed to resubmit task " + path + " version changed"); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 10e2642e32..60c2b66fb1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -37,7 +37,7 @@ import org.apache.zookeeper.KeeperException; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ZkCoordinatedStateManager implements CoordinatedStateManager { - protected ZooKeeperWatcher watcher; + protected ZKWatcher watcher; protected SplitLogWorkerCoordination splitLogWorkerCoordination; protected SplitLogManagerCoordination splitLogManagerCoordination; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 14e7796e16..12d544c0df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.coordination; import java.io.IOException; +import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -37,6 +38,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.zookeeper.RecoverableZK; +import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; @@ -50,11 +56,7 @@ import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; -import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.KeeperException; @@ -66,7 +68,7 @@ import org.apache.zookeeper.data.Stat; * */ @InterfaceAudience.Private -public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements +public class ZkSplitLogWorkerCoordination extends ZKListener implements SplitLogWorkerCoordination { private static final Log LOG = LogFactory.getLog(ZkSplitLogWorkerCoordination.class); @@ -92,13 +94,13 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements private final ServerName serverName; - public ZkSplitLogWorkerCoordination(ServerName serverName, ZooKeeperWatcher watcher) { + public ZkSplitLogWorkerCoordination(ServerName serverName, ZKWatcher watcher) { super(watcher); this.serverName = serverName; } /** - * Override handler from {@link ZooKeeperListener} + * Override handler from {@link ZKListener} */ @Override public void nodeChildrenChanged(String path) { @@ -112,7 +114,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements } /** - * Override handler from {@link ZooKeeperListener} + * Override handler from {@link ZKListener} */ @Override public void nodeDataChanged(String path) { @@ -165,7 +167,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements } public void getDataSetWatchAsync() { - watcher.getRecoverableZooKeeper().getZooKeeper() + watcher.getRecoverableZK().getZooKeeper() .getData(currentTask, watcher, new GetDataAsyncCallback(), null); SplitLogCounters.tot_wkr_get_data_queued.increment(); } @@ -217,7 +219,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements } try { try { - if ((data = ZKUtil.getDataNoWatch(watcher, path, stat)) == null) { + if ((data = ZooKeeperUtil.getDataNoWatch(watcher, path, stat)) == null) { SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.increment(); return; } @@ -333,7 +335,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements int availableRSs = 1; try { List regionServers = - ZKUtil.listChildrenNoWatch(watcher, watcher.znodePaths.rsZNode); + ZooKeeperUtil.listChildrenNoWatch(watcher, watcher.znodePaths.rsZNode); availableRSs = Math.max(availableRSs, (regionServers == null) ? 0 : regionServers.size()); } catch (KeeperException e) { // do nothing @@ -360,12 +362,12 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements * @param taskZKVersion version of the task in zk * @return non-negative integer value when task can be owned by current region server otherwise -1 */ - protected static int attemptToOwnTask(boolean isFirstTime, ZooKeeperWatcher zkw, + protected static int attemptToOwnTask(boolean isFirstTime, ZKWatcher zkw, ServerName server, String task, RecoveryMode mode, int taskZKVersion) { int latestZKVersion = FAILED_TO_OWN_TASK; try { SplitLogTask slt = new SplitLogTask.Owned(server, mode); - Stat stat = zkw.getRecoverableZooKeeper().setData(task, slt.toByteArray(), taskZKVersion); + Stat stat = zkw.getRecoverableZK().setData(task, slt.toByteArray(), taskZKVersion); if (stat == null) { LOG.warn("zk.setData() returned null for path " + task); SplitLogCounters.tot_wkr_task_heartbeat_failed.increment(); @@ -427,7 +429,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements // don't call ZKSplitLog.getNodeName() because that will lead to // double encoding of the path name if (this.calculateAvailableSplitters(numTasks) > 0) { - grabTask(ZKUtil.joinZNode(watcher.znodePaths.splitLogZNode, paths.get(idx))); + grabTask(ZooKeeperUtil.joinZNode(watcher.znodePaths.splitLogZNode, paths.get(idx))); } else { LOG.debug("Current region server " + server.getServerName() + " has " + this.tasksInProgress.get() + " tasks in progress and can't take more."); @@ -451,10 +453,10 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements int listSize = tmpCopy.size(); for (int i = 0; i < listSize; i++) { String region = tmpCopy.get(i); - String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, + String nodePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, region); try { - if (ZKUtil.checkExists(watcher, nodePath) == -1) { + if (ZooKeeperUtil.checkExists(watcher, nodePath) == -1) { server.getExecutorService().submit( new FinishRegionRecoveringHandler(server, region, nodePath)); } else { @@ -485,7 +487,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements // it will come out if worker thread exited. while (!shouldStop) { try { - childrenPaths = ZKUtil.listChildrenAndWatchForNewChildren(watcher, + childrenPaths = ZooKeeperUtil.listChildrenAndWatchForNewChildren(watcher, watcher.znodePaths.splitLogZNode); if (childrenPaths != null) { return childrenPaths; @@ -509,7 +511,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements public boolean isReady() throws InterruptedException { int result = -1; try { - result = ZKUtil.checkExists(watcher, watcher.znodePaths.splitLogZNode); + result = ZooKeeperUtil.checkExists(watcher, watcher.znodePaths.splitLogZNode); } catch (KeeperException e) { // ignore LOG.warn("Exception when checking for " + watcher.znodePaths.splitLogZNode @@ -553,7 +555,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements @Override public RegionStoreSequenceIds getRegionFlushedSequenceId(String failedServerName, String key) throws IOException { - return ZKSplitLog.getRegionFlushedSequenceId(watcher, failedServerName, key); + return getRegionFlushedSequenceId(watcher, failedServerName, key); } /** @@ -570,7 +572,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements getDataSetWatchFailure(path); return; } - data = RecoverableZooKeeper.removeMetaData(data); + data = RecoverableZK.removeMetaData(data); getDataSetWatchSuccess(path, data); } } @@ -590,7 +592,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements String task = zkDetails.getTaskNode(); int taskZKVersion = zkDetails.getCurTaskZKVersion().intValue(); try { - if (ZKUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) { + if (ZooKeeperUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) { LOG.info("successfully transitioned task " + task + " to final state " + slt); ctr.increment(); return; @@ -647,4 +649,44 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements } } + /** + * This function is used in distributedLogReplay to fetch last flushed sequence id from ZK + * @param zkw + * @param serverName + * @param encodedRegionName + * @return the last flushed sequence ids recorded in ZK of the region for serverName + * @throws IOException + */ + + public static RegionStoreSequenceIds getRegionFlushedSequenceId(ZKWatcher zkw, + String serverName, String encodedRegionName) throws IOException { + // when SplitLogWorker recovers a region by directly replaying unflushed WAL edits, + // last flushed sequence Id changes when newly assigned RS flushes writes to the region. + // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed + // sequence Id name space (sequence Id only valid for a particular RS instance), changes + // when different newly assigned RS flushes the region. + // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of + // last flushed sequence Id for each failed RS instance. + RegionStoreSequenceIds result = null; + String nodePath = ZooKeeperUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName); + nodePath = ZooKeeperUtil.joinZNode(nodePath, serverName); + try { + byte[] data; + try { + data = ZooKeeperUtil.getData(zkw, nodePath); + } catch (InterruptedException e) { + throw new InterruptedIOException(); + } + if (data != null) { + result = ZKUtil.parseRegionStoreSequenceIds(data); + } + } catch (KeeperException e) { + throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server=" + + serverName + "; region=" + encodedRegionName, e); + } catch (DeserializationException e) { + LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e); + } + return result; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java index 25e1ec8c3c..cd7f10274e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java @@ -23,6 +23,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -31,9 +33,8 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -50,7 +51,7 @@ import org.apache.zookeeper.KeeperException; * the active master of the cluster. */ @InterfaceAudience.Private -public class ActiveMasterManager extends ZooKeeperListener { +public class ActiveMasterManager extends ZKListener { private static final Log LOG = LogFactory.getLog(ActiveMasterManager.class); final AtomicBoolean clusterHasActiveMaster = new AtomicBoolean(false); @@ -65,7 +66,7 @@ public class ActiveMasterManager extends ZooKeeperListener { * @param sn ServerName * @param master In an instance of a Master. */ - ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) { + ActiveMasterManager(ZKWatcher watcher, ServerName sn, Server master) { super(watcher); watcher.registerListener(this); this.sn = sn; @@ -122,7 +123,7 @@ public class ActiveMasterManager extends ZooKeeperListener { // Watch the node and check if it exists. try { synchronized(clusterHasActiveMaster) { - if (ZKUtil.watchAndCheckExists(watcher, watcher.znodePaths.masterAddressZNode)) { + if (ZooKeeperUtil.watchAndCheckExists(watcher, watcher.znodePaths.masterAddressZNode)) { // A master node exists, there is an active master LOG.debug("A master is now available"); clusterHasActiveMaster.set(true); @@ -155,7 +156,7 @@ public class ActiveMasterManager extends ZooKeeperListener { */ boolean blockUntilBecomingActiveMaster( int checkInterval, MonitoredTask startupStatus) { - String backupZNode = ZKUtil.joinZNode( + String backupZNode = ZooKeeperUtil.joinZNode( this.watcher.znodePaths.backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { startupStatus.setStatus("Trying to register in ZK as active master"); @@ -167,9 +168,9 @@ public class ActiveMasterManager extends ZooKeeperListener { // If we were a backup master before, delete our ZNode from the backup // master directory since we are the active now) - if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) { + if (ZooKeeperUtil.checkExists(this.watcher, backupZNode) != -1) { LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory"); - ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode); + ZooKeeperUtil.deleteNodeFailSilent(this.watcher, backupZNode); } // Save the znode in a file, this will allow to check if we crash in the launch scripts ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString()); @@ -187,7 +188,7 @@ public class ActiveMasterManager extends ZooKeeperListener { String msg; byte[] bytes = - ZKUtil.getDataAndWatch(this.watcher, this.watcher.znodePaths.masterAddressZNode); + ZooKeeperUtil.getDataAndWatch(this.watcher, this.watcher.znodePaths.masterAddressZNode); if (bytes == null) { msg = ("A master was detected, but went down before its address " + "could be read. Attempting to become the next active master"); @@ -204,7 +205,7 @@ public class ActiveMasterManager extends ZooKeeperListener { msg = ("Current master has this master's address, " + currentMaster + "; master was restarted? Deleting node."); // Hurry along the expiration of the znode. - ZKUtil.deleteNode(this.watcher, this.watcher.znodePaths.masterAddressZNode); + ZooKeeperUtil.deleteNode(this.watcher, this.watcher.znodePaths.masterAddressZNode); // We may have failed to delete the znode at the previous step, but // we delete the file anyway: a second attempt to delete the znode is likely to fail again. @@ -244,7 +245,7 @@ public class ActiveMasterManager extends ZooKeeperListener { */ boolean hasActiveMaster() { try { - if (ZKUtil.checkExists(watcher, watcher.znodePaths.masterAddressZNode) >= 0) { + if (ZooKeeperUtil.checkExists(watcher, watcher.znodePaths.masterAddressZNode) >= 0) { return true; } } @@ -270,7 +271,7 @@ public class ActiveMasterManager extends ZooKeeperListener { LOG.warn("Failed get of master address: " + e.toString()); } if (activeMaster != null && activeMaster.equals(this.sn)) { - ZKUtil.deleteNode(watcher, watcher.znodePaths.masterAddressZNode); + ZooKeeperUtil.deleteNode(watcher, watcher.znodePaths.masterAddressZNode); // We may have failed to delete the znode at the previous step, but // we delete the file anyway: a second attempt to delete the znode is likely to fail again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java similarity index 86% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java index cdc67942e8..3b03edfc49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DrainingServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/DrainingServerTracker.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.zookeeper; +package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; @@ -24,11 +24,12 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.master.ServerListener; -import org.apache.hadoop.hbase.master.ServerManager; import org.apache.zookeeper.KeeperException; /** @@ -49,14 +50,14 @@ import org.apache.zookeeper.KeeperException; * */ @InterfaceAudience.Private -public class DrainingServerTracker extends ZooKeeperListener { +public class DrainingServerTracker extends ZKListener { private static final Log LOG = LogFactory.getLog(DrainingServerTracker.class); private ServerManager serverManager; private final NavigableSet drainingServers = new TreeSet<>(); private Abortable abortable; - public DrainingServerTracker(ZooKeeperWatcher watcher, + public DrainingServerTracker(ZKWatcher watcher, Abortable abortable, ServerManager serverManager) { super(watcher); this.abortable = abortable; @@ -82,7 +83,7 @@ public class DrainingServerTracker extends ZooKeeperListener { } }); List servers = - ZKUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.drainingZNode); + ZooKeeperUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.drainingZNode); add(servers); } @@ -90,7 +91,7 @@ public class DrainingServerTracker extends ZooKeeperListener { synchronized(this.drainingServers) { this.drainingServers.clear(); for (String n: servers) { - final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(n)); + final ServerName sn = ServerName.valueOf(ZooKeeperUtil.getNodeName(n)); this.drainingServers.add(sn); this.serverManager.addServerToDrainList(sn); LOG.info("Draining RS node created, adding to list [" + @@ -110,7 +111,7 @@ public class DrainingServerTracker extends ZooKeeperListener { @Override public void nodeDeleted(final String path) { if(path.startsWith(watcher.znodePaths.drainingZNode)) { - final ServerName sn = ServerName.valueOf(ZKUtil.getNodeName(path)); + final ServerName sn = ServerName.valueOf(ZooKeeperUtil.getNodeName(path)); LOG.info("Draining RS node deleted, removing from list [" + sn + "]"); remove(sn); @@ -122,7 +123,7 @@ public class DrainingServerTracker extends ZooKeeperListener { if(path.equals(watcher.znodePaths.drainingZNode)) { try { final List newNodes = - ZKUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.drainingZNode); + ZooKeeperUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.drainingZNode); add(newNodes); } catch (KeeperException e) { abortable.abort("Unexpected zk exception getting RS nodes", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c5c86e5f48..b5ab00c5f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -174,16 +174,13 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.util.ZKDataMigrator; -import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker; import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; -import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; -import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.eclipse.jetty.server.Server; @@ -722,7 +719,7 @@ public class HMaster extends HRegionServer implements MasterServices { LOG.info("Server active/primary master=" + this.serverName + ", sessionid=0x" + - Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) + + Long.toHexString(this.zooKeeper.getRecoverableZK().getSessionId()) + ", setting cluster-up flag (Was=" + wasUp + ")"); // create/initialize the snapshot manager and other procedure managers @@ -866,7 +863,7 @@ public class HMaster extends HRegionServer implements MasterServices { LOG.info("Converting state from zk to new states:" + entry); tableStateManager.setTableState(entry.getKey(), entry.getValue()); } - ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().znodePaths.tableZNode); + ZooKeeperUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().znodePaths.tableZNode); status.setStatus("Submitting log splitting work for previously failed region servers"); metaBootstrap.processDeadServers(); @@ -1992,7 +1989,7 @@ public class HMaster extends HRegionServer implements MasterServices { } private void startActiveMasterManager(int infoPort) throws KeeperException { - String backupZNode = ZKUtil.joinZNode( + String backupZNode = ZooKeeperUtil.joinZNode( zooKeeper.znodePaths.backupMasterAddressesZNode, serverName.toString()); /* * Add a ZNode for ourselves in the backup master directory since we @@ -2487,7 +2484,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Build Set of backup masters from ZK nodes List backupMasterStrings; try { - backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper, + backupMasterStrings = ZooKeeperUtil.listChildrenNoWatch(this.zooKeeper, this.zooKeeper.znodePaths.backupMasterAddressesZNode); } catch (KeeperException e) { LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e); @@ -2501,7 +2498,7 @@ public class HMaster extends HRegionServer implements MasterServices { try { byte [] bytes; try { - bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode( + bytes = ZooKeeperUtil.getData(this.zooKeeper, ZooKeeperUtil.joinZNode( this.zooKeeper.znodePaths.backupMasterAddressesZNode, s)); } catch (InterruptedException e) { throw new InterruptedIOException(); @@ -2624,7 +2621,7 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zooKeeper; } @@ -3417,8 +3414,8 @@ public class HMaster extends HRegionServer implements MasterServices { String parentZnode = getZooKeeper().znodePaths.drainingZNode; for (ServerName server : servers) { try { - String node = ZKUtil.joinZNode(parentZnode, server.getServerName()); - ZKUtil.createAndFailSilent(getZooKeeper(), node); + String node = ZooKeeperUtil.joinZNode(parentZnode, server.getServerName()); + ZooKeeperUtil.createAndFailSilent(getZooKeeper(), node); } catch (KeeperException ke) { throw new HBaseIOException( this.zooKeeper.prefix("Unable to decommission '" + server.getServerName() + "'."), ke); @@ -3464,9 +3461,9 @@ public class HMaster extends HRegionServer implements MasterServices { final List encodedRegionNames) throws HBaseIOException { // Remove the server from decommissioned (draining) server list. String parentZnode = getZooKeeper().znodePaths.drainingZNode; - String node = ZKUtil.joinZNode(parentZnode, server.getServerName()); + String node = ZooKeeperUtil.joinZNode(parentZnode, server.getServerName()); try { - ZKUtil.deleteNodeFailSilent(getZooKeeper(), node); + ZooKeeperUtil.deleteNodeFailSilent(getZooKeeper(), node); } catch (KeeperException ke) { throw new HBaseIOException( this.zooKeeper.prefix("Unable to recommission '" + server.getServerName() + "'."), ke); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index f9a441d5ab..90471b53e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZNodeClearer; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -42,7 +43,6 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ServerCommandLine; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.zookeeper.KeeperException; @@ -196,7 +196,7 @@ public class HMasterCommandLine extends ServerCommandLine { } // login the zookeeper server principal (if using security) - ZKUtil.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, + ZooKeeperUtil.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, null); int localZKClusterSessionTimeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster", 10*1000); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java index 677a2a0b30..031812d838 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -85,7 +85,7 @@ public class MasterMetaBootstrap { } private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { - final ZooKeeperWatcher zooKeeper = master.getZooKeeper(); + final ZKWatcher zooKeeper = master.getZooKeeper(); // unassign the unneeded replicas (for e.g., if the previous master was configured // with a replication of 3 and now it is 2, we need to unassign the 1 unneeded replica) try { @@ -98,7 +98,7 @@ public class MasterMetaBootstrap { // send a close and wait for a max of 30 seconds ServerManager.closeRegionSilentlyAndWait(master.getClusterConnection(), r.getServerName(), r.getRegion(), 30000); - ZKUtil.deleteNode(zooKeeper, zooKeeper.znodePaths.getZNodeForReplica(replicaId)); + ZooKeeperUtil.deleteNode(zooKeeper, zooKeeper.znodePaths.getZNodeForReplica(replicaId)); } } } catch (Exception ex) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java index d13fb76ba8..be25e15054 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java @@ -26,10 +26,11 @@ import java.util.Map.Entry; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.quotas.QuotaObserverChore; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; /** * Impl for exposing HMaster Information through JMX @@ -70,7 +71,7 @@ public class MetricsMasterWrapperImpl implements MetricsMasterWrapper { @Override public String getZookeeperQuorum() { - ZooKeeperWatcher zk = master.getZooKeeper(); + ZKWatcher zk = master.getZooKeeper(); if (zk == null) { return ""; } @@ -100,7 +101,7 @@ public class MetricsMasterWrapperImpl implements MetricsMasterWrapper { } return StringUtils.join(serverManager.getOnlineServers().keySet(), ";"); } - + @Override public int getNumRegionServers() { ServerManager serverManager = this.master.getServerManager(); @@ -119,7 +120,7 @@ public class MetricsMasterWrapperImpl implements MetricsMasterWrapper { return StringUtils.join(serverManager.getDeadServers().copyServerNames(), ";"); } - + @Override public int getNumDeadRegionServers() { ServerManager serverManager = this.master.getServerManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java similarity index 87% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java index 47d504c03d..5584bfb21d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionServerTracker.java @@ -16,7 +16,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.zookeeper; +package org.apache.hadoop.hbase.master; import java.io.IOException; import java.io.InterruptedIOException; @@ -28,9 +28,10 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.zookeeper.KeeperException; @@ -46,13 +47,13 @@ import org.apache.zookeeper.KeeperException; * {@link ServerManager#expireServer(ServerName)} */ @InterfaceAudience.Private -public class RegionServerTracker extends ZooKeeperListener { +public class RegionServerTracker extends ZKListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); private NavigableMap regionServers = new TreeMap<>(); private ServerManager serverManager; private MasterServices server; - public RegionServerTracker(ZooKeeperWatcher watcher, + public RegionServerTracker(ZKWatcher watcher, MasterServices server, ServerManager serverManager) { super(watcher); this.server = server; @@ -70,7 +71,7 @@ public class RegionServerTracker extends ZooKeeperListener { public void start() throws KeeperException, IOException { watcher.registerListener(this); List servers = - ZKUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.rsZNode); + ZooKeeperUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.rsZNode); refresh(servers); } @@ -78,12 +79,12 @@ public class RegionServerTracker extends ZooKeeperListener { synchronized(this.regionServers) { this.regionServers.clear(); for (String n: servers) { - ServerName sn = ServerName.parseServerName(ZKUtil.getNodeName(n)); + ServerName sn = ServerName.parseServerName(ZooKeeperUtil.getNodeName(n)); if (regionServers.get(sn) == null) { RegionServerInfo.Builder rsInfoBuilder = RegionServerInfo.newBuilder(); try { - String nodePath = ZKUtil.joinZNode(watcher.znodePaths.rsZNode, n); - byte[] data = ZKUtil.getData(watcher, nodePath); + String nodePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.rsZNode, n); + byte[] data = ZooKeeperUtil.getData(watcher, nodePath); if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { int magicLen = ProtobufUtil.lengthOfPBMagic(); ProtobufUtil.mergeFrom(rsInfoBuilder, data, magicLen, data.length - magicLen); @@ -116,7 +117,7 @@ public class RegionServerTracker extends ZooKeeperListener { @Override public void nodeDeleted(String path) { if (path.startsWith(watcher.znodePaths.rsZNode)) { - String serverName = ZKUtil.getNodeName(path); + String serverName = ZooKeeperUtil.getNodeName(path); LOG.info("RegionServer ephemeral node deleted, processing expiration [" + serverName + "]"); ServerName sn = ServerName.parseServerName(serverName); @@ -136,7 +137,7 @@ public class RegionServerTracker extends ZooKeeperListener { && !server.isAborted() && !server.isStopped()) { try { List servers = - ZKUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.rsZNode); + ZooKeeperUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.rsZNode); refresh(servers); } catch (IOException e) { server.abort("Unexpected zk exception getting RS nodes", e); @@ -149,7 +150,7 @@ public class RegionServerTracker extends ZooKeeperListener { public RegionServerInfo getRegionServerInfo(final ServerName sn) { return regionServers.get(sn); } - + /** * Gets the online servers. * @return list of online servers diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index c0143030a4..203afb28e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -42,14 +42,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; -import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.YouAreDeadException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RetriesExhaustedException; @@ -58,8 +57,8 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -507,7 +506,7 @@ public class ServerManager { void letRegionServersShutdown() { long previousLogTime = 0; ServerName sn = master.getServerName(); - ZooKeeperWatcher zkw = master.getZooKeeper(); + ZKWatcher zkw = master.getZooKeeper(); int onlineServersCt; while ((onlineServersCt = onlineServers.size()) > 0){ @@ -554,9 +553,9 @@ public class ServerManager { } } - private List getRegionServersInZK(final ZooKeeperWatcher zkw) + private List getRegionServersInZK(final ZKWatcher zkw) throws KeeperException { - return ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode); + return ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode); } /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitOrMergeTracker.java similarity index 80% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitOrMergeTracker.java index 8323ec4692..d8c31bd912 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitOrMergeTracker.java @@ -15,12 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.zookeeper; +package org.apache.hadoop.hbase.master; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -43,18 +46,18 @@ public class SplitOrMergeTracker { private SwitchStateTracker splitStateTracker; private SwitchStateTracker mergeStateTracker; - public SplitOrMergeTracker(ZooKeeperWatcher watcher, Configuration conf, + public SplitOrMergeTracker(ZKWatcher watcher, Configuration conf, Abortable abortable) { try { - if (ZKUtil.checkExists(watcher, watcher.znodePaths.switchZNode) < 0) { - ZKUtil.createAndFailSilent(watcher, watcher.znodePaths.switchZNode); + if (ZooKeeperUtil.checkExists(watcher, watcher.znodePaths.switchZNode) < 0) { + ZooKeeperUtil.createAndFailSilent(watcher, watcher.znodePaths.switchZNode); } } catch (KeeperException e) { throw new RuntimeException(e); } - splitZnode = ZKUtil.joinZNode(watcher.znodePaths.switchZNode, + splitZnode = ZooKeeperUtil.joinZNode(watcher.znodePaths.switchZNode, conf.get("zookeeper.znode.switch.split", "split")); - mergeZnode = ZKUtil.joinZNode(watcher.znodePaths.switchZNode, + mergeZnode = ZooKeeperUtil.joinZNode(watcher.znodePaths.switchZNode, conf.get("zookeeper.znode.switch.merge", "merge")); splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable); mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable); @@ -67,12 +70,12 @@ public class SplitOrMergeTracker { public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { switch (switchType) { - case SPLIT: - return splitStateTracker.isSwitchEnabled(); - case MERGE: - return mergeStateTracker.isSwitchEnabled(); - default: - break; + case SPLIT: + return splitStateTracker.isSwitchEnabled(); + case MERGE: + return mergeStateTracker.isSwitchEnabled(); + default: + break; } return false; } @@ -91,9 +94,9 @@ public class SplitOrMergeTracker { } } - private static class SwitchStateTracker extends ZooKeeperNodeTracker { + private static class SwitchStateTracker extends ZKNodeTracker { - public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) { + public SwitchStateTracker(ZKWatcher watcher, String node, Abortable abortable) { super(watcher, node, abortable); } @@ -120,9 +123,9 @@ public class SplitOrMergeTracker { public void setSwitchEnabled(boolean enabled) throws KeeperException { byte [] upData = toByteArray(enabled); try { - ZKUtil.setData(watcher, node, upData); + ZooKeeperUtil.setData(watcher, node, upData); } catch(KeeperException.NoNodeException nne) { - ZKUtil.createAndWatch(watcher, node, upData); + ZooKeeperUtil.createAndWatch(watcher, node, upData); } super.nodeDataChanged(node); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java index 822ca6f6f3..c83c98b86a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeers; @@ -37,8 +38,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -47,12 +47,12 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class ReplicationZKNodeCleaner { private static final Log LOG = LogFactory.getLog(ReplicationZKNodeCleaner.class); - private final ZooKeeperWatcher zkw; + private final ZKWatcher zkw; private final ReplicationQueuesClient queuesClient; private final ReplicationPeers replicationPeers; private final ReplicationQueueDeletor queueDeletor; - public ReplicationZKNodeCleaner(Configuration conf, ZooKeeperWatcher zkw, Abortable abortable) + public ReplicationZKNodeCleaner(Configuration conf, ZKWatcher zkw, Abortable abortable) throws IOException { try { this.zkw = zkw; @@ -111,7 +111,7 @@ public class ReplicationZKNodeCleaner { Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds()); String hfileRefsZNode = queueDeletor.getHfileRefsZNode(); try { - if (-1 == ZKUtil.checkExists(zkw, hfileRefsZNode)) { + if (-1 == ZooKeeperUtil.checkExists(zkw, hfileRefsZNode)) { return null; } List listOfPeers = this.queuesClient.getAllPeersFromHFileRefsQueue(); @@ -129,7 +129,7 @@ public class ReplicationZKNodeCleaner { private class ReplicationQueueDeletor extends ReplicationStateZKBase { - public ReplicationQueueDeletor(ZooKeeperWatcher zk, Configuration conf, Abortable abortable) { + public ReplicationQueueDeletor(ZKWatcher zk, Configuration conf, Abortable abortable) { super(zk, conf, abortable); } @@ -139,12 +139,13 @@ public class ReplicationZKNodeCleaner { * @throws IOException */ public void removeQueue(final String replicator, final String queueId) throws IOException { - String queueZnodePath = ZKUtil.joinZNode(ZKUtil.joinZNode(this.queuesZNode, replicator), + String queueZnodePath = ZooKeeperUtil + .joinZNode(ZooKeeperUtil.joinZNode(this.queuesZNode, replicator), queueId); try { ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId); if (!replicationPeers.getAllPeerIds().contains(queueInfo.getPeerId())) { - ZKUtil.deleteNodeRecursively(this.zookeeper, queueZnodePath); + ZooKeeperUtil.deleteNodeRecursively(this.zookeeper, queueZnodePath); LOG.info("Successfully removed replication queue, replicator: " + replicator + ", queueId: " + queueId); } @@ -159,10 +160,10 @@ public class ReplicationZKNodeCleaner { * @throws IOException */ public void removeHFileRefsQueue(final String hfileRefsQueueId) throws IOException { - String node = ZKUtil.joinZNode(this.hfileRefsZNode, hfileRefsQueueId); + String node = ZooKeeperUtil.joinZNode(this.hfileRefsZNode, hfileRefsQueueId); try { if (!replicationPeers.getAllPeerIds().contains(hfileRefsQueueId)) { - ZKUtil.deleteNodeRecursively(this.zookeeper, node); + ZooKeeperUtil.deleteNodeRecursively(this.zookeeper, node); LOG.info("Successfully removed hfile-refs queue " + hfileRefsQueueId + " from path " + hfileRefsZNode); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java index df94ffe7a5..7cfaefdb5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** * Manages and performs all replication admin operations. @@ -49,11 +49,11 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; public class ReplicationManager { private final Configuration conf; - private final ZooKeeperWatcher zkw; + private final ZKWatcher zkw; private final ReplicationQueuesClient replicationQueuesClient; private final ReplicationPeers replicationPeers; - public ReplicationManager(Configuration conf, ZooKeeperWatcher zkw, Abortable abortable) + public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable abortable) throws IOException { this.conf = conf; this.zkw = zkw; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java index 8d34fe4227..a78dee0682 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinator.java @@ -24,11 +24,11 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -40,7 +40,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { private ZKProcedureUtil zkProc = null; protected ProcedureCoordinator coordinator = null; // if started this should be non-null - ZooKeeperWatcher watcher; + ZKWatcher watcher; String procedureType; String coordName; @@ -51,7 +51,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { * @param coordName name of the node running the coordinator * @throws KeeperException if an unexpected zk error occurs */ - public ZKProcedureCoordinator(ZooKeeperWatcher watcher, + public ZKProcedureCoordinator(ZKWatcher watcher, String procedureClass, String coordName) { this.watcher = watcher; this.procedureType = procedureClass; @@ -76,7 +76,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { String abortNode = zkProc.getAbortZNode(procName); try { // check to see if the abort node already exists - if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), abortNode)) { + if (ZooKeeperUtil.watchAndCheckExists(zkProc.getWatcher(), abortNode)) { abort(abortNode); } // If we get an abort node watch triggered here, we'll go complete creating the acquired @@ -93,12 +93,12 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { try { // notify all the procedure listeners to look for the acquire node byte[] data = ProtobufUtil.prependPBMagic(info); - ZKUtil.createWithParents(zkProc.getWatcher(), acquire, data); + ZooKeeperUtil.createWithParents(zkProc.getWatcher(), acquire, data); // loop through all the children of the acquire phase and watch for them for (String node : nodeNames) { - String znode = ZKUtil.joinZNode(acquire, node); + String znode = ZooKeeperUtil.joinZNode(acquire, node); LOG.debug("Watching for acquire node:" + znode); - if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) { + if (ZooKeeperUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) { coordinator.memberAcquiredBarrier(procName, node); } } @@ -116,12 +116,12 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { LOG.debug("Creating reached barrier zk node:" + reachedNode); try { // create the reached znode and watch for the reached znodes - ZKUtil.createWithParents(zkProc.getWatcher(), reachedNode); + ZooKeeperUtil.createWithParents(zkProc.getWatcher(), reachedNode); // loop through all the children of the acquire phase and watch for them for (String node : nodeNames) { - String znode = ZKUtil.joinZNode(reachedNode, node); - if (ZKUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) { - byte[] dataFromMember = ZKUtil.getData(zkProc.getWatcher(), znode); + String znode = ZooKeeperUtil.joinZNode(reachedNode, node); + if (ZooKeeperUtil.watchAndCheckExists(zkProc.getWatcher(), znode)) { + byte[] dataFromMember = ZooKeeperUtil.getData(zkProc.getWatcher(), znode); // ProtobufUtil.isPBMagicPrefix will check null if (dataFromMember != null && dataFromMember.length > 0) { if (!ProtobufUtil.isPBMagicPrefix(dataFromMember)) { @@ -196,17 +196,17 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { logZKTree(this.baseZNode); if (isAcquiredPathNode(path)) { // node wasn't present when we created the watch so zk event triggers acquire - coordinator.memberAcquiredBarrier(ZKUtil.getNodeName(ZKUtil.getParent(path)), - ZKUtil.getNodeName(path)); + coordinator.memberAcquiredBarrier(ZooKeeperUtil.getNodeName(ZooKeeperUtil.getParent(path)), + ZooKeeperUtil.getNodeName(path)); } else if (isReachedPathNode(path)) { // node was absent when we created the watch so zk event triggers the finished barrier. // TODO Nothing enforces that acquire and reached znodes from showing up in wrong order. - String procName = ZKUtil.getNodeName(ZKUtil.getParent(path)); - String member = ZKUtil.getNodeName(path); + String procName = ZooKeeperUtil.getNodeName(ZooKeeperUtil.getParent(path)); + String member = ZooKeeperUtil.getNodeName(path); // get the data from the procedure member try { - byte[] dataFromMember = ZKUtil.getData(watcher, path); + byte[] dataFromMember = ZooKeeperUtil.getData(watcher, path); // ProtobufUtil.isPBMagicPrefix will check null if (dataFromMember != null && dataFromMember.length > 0) { if (!ProtobufUtil.isPBMagicPrefix(dataFromMember)) { @@ -264,7 +264,7 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { String source = (ee.getSource() == null) ? coordName : ee.getSource(); byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee)); // first create the znode for the procedure - ZKUtil.createAndFailSilent(zkProc.getWatcher(), procAbortNode, errorInfo); + ZooKeeperUtil.createAndFailSilent(zkProc.getWatcher(), procAbortNode, errorInfo); LOG.debug("Finished creating abort node:" + procAbortNode); } catch (KeeperException e) { // possible that we get this error for the procedure if we already reset the zk state, but in @@ -280,10 +280,10 @@ public class ZKProcedureCoordinator implements ProcedureCoordinatorRpcs { * @param abortNode full znode path to the failed procedure information */ protected void abort(String abortNode) { - String procName = ZKUtil.getNodeName(abortNode); + String procName = ZooKeeperUtil.getNodeName(abortNode); ForeignException ee = null; try { - byte[] data = ZKUtil.getData(zkProc.getWatcher(), abortNode); + byte[] data = ZooKeeperUtil.getData(zkProc.getWatcher(), abortNode); if (data == null || data.length == 0) { // ignore return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index f8db277eee..c8148fe815 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -23,12 +23,12 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -60,12 +60,12 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { /** * Must call {@link #start(String, ProcedureMember)} before this can be used. - * @param watcher {@link ZooKeeperWatcher} to be owned by this. Closed via + * @param watcher {@link ZKWatcher} to be owned by this. Closed via * {@link #close()}. * @param procType name of the znode describing the procedure type * @throws KeeperException if we can't reach zookeeper */ - public ZKProcedureMemberRpcs(final ZooKeeperWatcher watcher, final String procType) + public ZKProcedureMemberRpcs(final ZKWatcher watcher, final String procType) throws KeeperException { this.zkController = new ZKProcedureUtil(watcher, procType) { @Override @@ -83,7 +83,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { watchForAbortedProcedures(); return; } - String parent = ZKUtil.getParent(path); + String parent = ZooKeeperUtil.getParent(path); // if its the end barrier, the procedure can be completed if (isReachedNode(parent)) { receivedReachedGlobalBarrier(path); @@ -126,7 +126,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { */ private void receivedReachedGlobalBarrier(String path) { LOG.debug("Received reached global barrier:" + path); - String procName = ZKUtil.getNodeName(path); + String procName = ZooKeeperUtil.getNodeName(path); this.member.receivedReachedGlobalBarrier(procName); } @@ -134,9 +134,9 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { LOG.debug("Checking for aborted procedures on node: '" + zkController.getAbortZnode() + "'"); try { // this is the list of the currently aborted procedues - for (String node : ZKUtil.listChildrenAndWatchForNewChildren(zkController.getWatcher(), + for (String node : ZooKeeperUtil.listChildrenAndWatchForNewChildren(zkController.getWatcher(), zkController.getAbortZnode())) { - String abortNode = ZKUtil.joinZNode(zkController.getAbortZnode(), node); + String abortNode = ZooKeeperUtil.joinZNode(zkController.getAbortZnode(), node); abort(abortNode); } } catch (KeeperException e) { @@ -150,7 +150,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { LOG.debug("Looking for new procedures under znode:'" + zkController.getAcquiredBarrier() + "'"); List runningProcedures = null; try { - runningProcedures = ZKUtil.listChildrenAndWatchForNewChildren(zkController.getWatcher(), + runningProcedures = ZooKeeperUtil.listChildrenAndWatchForNewChildren(zkController.getWatcher(), zkController.getAcquiredBarrier()); if (runningProcedures == null) { LOG.debug("No running procedures."); @@ -166,7 +166,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { } for (String procName : runningProcedures) { // then read in the procedure information - String path = ZKUtil.joinZNode(zkController.getAcquiredBarrier(), procName); + String path = ZooKeeperUtil.joinZNode(zkController.getAcquiredBarrier(), procName); startNewSubprocedure(path); } } @@ -180,11 +180,11 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { */ private synchronized void startNewSubprocedure(String path) { LOG.debug("Found procedure znode: " + path); - String opName = ZKUtil.getNodeName(path); + String opName = ZooKeeperUtil.getNodeName(path); // start watching for an abort notification for the procedure String abortZNode = zkController.getAbortZNode(opName); try { - if (ZKUtil.watchAndCheckExists(zkController.getWatcher(), abortZNode)) { + if (ZooKeeperUtil.watchAndCheckExists(zkController.getWatcher(), abortZNode)) { LOG.debug("Not starting:" + opName + " because we already have an abort notification."); return; } @@ -197,7 +197,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { // get the data for the procedure Subprocedure subproc = null; try { - byte[] data = ZKUtil.getData(zkController.getWatcher(), path); + byte[] data = ZooKeeperUtil.getData(zkController.getWatcher(), path); if (!ProtobufUtil.isPBMagicPrefix(data)) { String msg = "Data in for starting procedure " + opName + " is illegally formatted (no pb magic). " + @@ -238,14 +238,14 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { try { LOG.debug("Member: '" + memberName + "' joining acquired barrier for procedure (" + procName + ") in zk"); - String acquiredZNode = ZKUtil.joinZNode(ZKProcedureUtil.getAcquireBarrierNode( + String acquiredZNode = ZooKeeperUtil.joinZNode(ZKProcedureUtil.getAcquireBarrierNode( zkController, procName), memberName); - ZKUtil.createAndFailSilent(zkController.getWatcher(), acquiredZNode); + ZooKeeperUtil.createAndFailSilent(zkController.getWatcher(), acquiredZNode); // watch for the complete node for this snapshot String reachedBarrier = zkController.getReachedBarrierNode(procName); LOG.debug("Watch for global barrier reached:" + reachedBarrier); - if (ZKUtil.watchAndCheckExists(zkController.getWatcher(), reachedBarrier)) { + if (ZooKeeperUtil.watchAndCheckExists(zkController.getWatcher(), reachedBarrier)) { receivedReachedGlobalBarrier(reachedBarrier); } } catch (KeeperException e) { @@ -262,13 +262,14 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { String procName = sub.getName(); LOG.debug("Marking procedure '" + procName + "' completed for member '" + memberName + "' in zk"); - String joinPath = ZKUtil.joinZNode(zkController.getReachedBarrierNode(procName), memberName); + String joinPath = ZooKeeperUtil + .joinZNode(zkController.getReachedBarrierNode(procName), memberName); // ProtobufUtil.prependPBMagic does not take care of null if (data == null) { data = new byte[0]; } try { - ZKUtil.createAndFailSilent(zkController.getWatcher(), joinPath, + ZooKeeperUtil.createAndFailSilent(zkController.getWatcher(), joinPath, ProtobufUtil.prependPBMagic(data)); } catch (KeeperException e) { member.controllerConnectionFailure("Failed to post zk node:" + joinPath @@ -292,7 +293,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { try { String source = (ee.getSource() == null) ? memberName: ee.getSource(); byte[] errorInfo = ProtobufUtil.prependPBMagic(ForeignException.serialize(source, ee)); - ZKUtil.createAndFailSilent(zkController.getWatcher(), procAbortZNode, errorInfo); + ZooKeeperUtil.createAndFailSilent(zkController.getWatcher(), procAbortZNode, errorInfo); LOG.debug("Finished creating abort znode:" + procAbortZNode); } catch (KeeperException e) { // possible that we get this error for the procedure if we already reset the zk state, but in @@ -309,9 +310,9 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { */ protected void abort(String abortZNode) { LOG.debug("Aborting procedure member for znode " + abortZNode); - String opName = ZKUtil.getNodeName(abortZNode); + String opName = ZooKeeperUtil.getNodeName(abortZNode); try { - byte[] data = ZKUtil.getData(zkController.getWatcher(), abortZNode); + byte[] data = ZooKeeperUtil.getData(zkController.getWatcher(), abortZNode); // figure out the data we need to pass ForeignException ee; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java index a19ecb2254..c0705099e5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureUtil.java @@ -23,10 +23,10 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -49,7 +49,7 @@ import org.apache.zookeeper.KeeperException; */ @InterfaceAudience.Private public abstract class ZKProcedureUtil - extends ZooKeeperListener implements Closeable { + extends ZKListener implements Closeable { private static final Log LOG = LogFactory.getLog(ZKProcedureUtil.class); @@ -72,23 +72,23 @@ public abstract class ZKProcedureUtil * @param procDescription name of the znode describing the procedure to run * @throws KeeperException when the procedure znodes cannot be created */ - public ZKProcedureUtil(ZooKeeperWatcher watcher, String procDescription) + public ZKProcedureUtil(ZKWatcher watcher, String procDescription) throws KeeperException { super(watcher); // make sure we are listening for events watcher.registerListener(this); // setup paths for the zknodes used in procedures - this.baseZNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, procDescription); - acquiredZnode = ZKUtil.joinZNode(baseZNode, ACQUIRED_BARRIER_ZNODE_DEFAULT); - reachedZnode = ZKUtil.joinZNode(baseZNode, REACHED_BARRIER_ZNODE_DEFAULT); - abortZnode = ZKUtil.joinZNode(baseZNode, ABORT_ZNODE_DEFAULT); + this.baseZNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, procDescription); + acquiredZnode = ZooKeeperUtil.joinZNode(baseZNode, ACQUIRED_BARRIER_ZNODE_DEFAULT); + reachedZnode = ZooKeeperUtil.joinZNode(baseZNode, REACHED_BARRIER_ZNODE_DEFAULT); + abortZnode = ZooKeeperUtil.joinZNode(baseZNode, ABORT_ZNODE_DEFAULT); // first make sure all the ZK nodes exist // make sure all the parents exist (sometimes not the case in tests) - ZKUtil.createWithParents(watcher, acquiredZnode); + ZooKeeperUtil.createWithParents(watcher, acquiredZnode); // regular create because all the parents exist - ZKUtil.createAndFailSilent(watcher, reachedZnode); - ZKUtil.createAndFailSilent(watcher, abortZnode); + ZooKeeperUtil.createAndFailSilent(watcher, reachedZnode); + ZooKeeperUtil.createAndFailSilent(watcher, abortZnode); } @Override @@ -130,7 +130,7 @@ public abstract class ZKProcedureUtil */ public static String getAcquireBarrierNode(ZKProcedureUtil controller, String opInstanceName) { - return ZKUtil.joinZNode(controller.acquiredZnode, opInstanceName); + return ZooKeeperUtil.joinZNode(controller.acquiredZnode, opInstanceName); } /** @@ -142,7 +142,7 @@ public abstract class ZKProcedureUtil */ public static String getReachedBarrierNode(ZKProcedureUtil controller, String opInstanceName) { - return ZKUtil.joinZNode(controller.reachedZnode, opInstanceName); + return ZooKeeperUtil.joinZNode(controller.reachedZnode, opInstanceName); } /** @@ -153,10 +153,10 @@ public abstract class ZKProcedureUtil * @return full znode path to the abort znode */ public static String getAbortNode(ZKProcedureUtil controller, String opInstanceName) { - return ZKUtil.joinZNode(controller.abortZnode, opInstanceName); + return ZooKeeperUtil.joinZNode(controller.abortZnode, opInstanceName); } - public ZooKeeperWatcher getWatcher() { + public ZKWatcher getWatcher() { return watcher; } @@ -212,7 +212,7 @@ public abstract class ZKProcedureUtil private boolean isMemberNode(final String path, final String statePath) { int count = 0; for (int i = statePath.length(); i < path.length(); ++i) { - count += (path.charAt(i) == ZKUtil.ZNODE_PATH_SEPARATOR) ? 1 : 0; + count += (path.charAt(i) == ZooKeeperUtil.ZNODE_PATH_SEPARATOR) ? 1 : 0; } return count == 2; } @@ -257,11 +257,11 @@ public abstract class ZKProcedureUtil * @throws KeeperException if an unexpected exception occurs */ protected void logZKTree(String root, String prefix) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(watcher, root); + List children = ZooKeeperUtil.listChildrenNoWatch(watcher, root); if (children == null) return; for (String child : children) { LOG.debug(prefix + child); - String node = ZKUtil.joinZNode(root.equals("/") ? "" : root, child); + String node = ZooKeeperUtil.joinZNode(root.equals("/") ? "" : root, child); logZKTree(node, prefix + "---"); } } @@ -273,7 +273,8 @@ public abstract class ZKProcedureUtil // If the coordinator was shutdown mid-procedure, then we are going to lose // an procedure that was previously started by cleaning out all the previous state. Its much // harder to figure out how to keep an procedure going and the subject of HBASE-5487. - ZKUtil.deleteChildrenRecursivelyMultiOrSequential(watcher, true, acquiredZnode, reachedZnode, + ZooKeeperUtil + .deleteChildrenRecursivelyMultiOrSequential(watcher, true, acquiredZnode, reachedZnode, abortZnode); if (LOG.isTraceEnabled()) { @@ -290,10 +291,10 @@ public abstract class ZKProcedureUtil String reachedBarrierNode = getReachedBarrierNode(procedureName); String abortZNode = getAbortZNode(procedureName); - ZKUtil.createAndFailSilent(watcher, acquiredBarrierNode); - ZKUtil.createAndFailSilent(watcher, abortZNode); + ZooKeeperUtil.createAndFailSilent(watcher, acquiredBarrierNode); + ZooKeeperUtil.createAndFailSilent(watcher, abortZNode); - ZKUtil.deleteNodeRecursivelyMultiOrSequential(watcher, true, acquiredBarrierNode, + ZooKeeperUtil.deleteNodeRecursivelyMultiOrSequential(watcher, true, acquiredBarrierNode, reachedBarrierNode, abortZNode); if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index ea34714a1e..e2d3a07b94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -48,9 +49,7 @@ import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -320,7 +319,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur @Override public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; - ZooKeeperWatcher zkw = rss.getZooKeeper(); + ZKWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f384c1f5aa..868eed5132 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -160,12 +160,11 @@ import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.RecoveringRegionWatcher; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.ReflectionUtils; @@ -393,7 +392,7 @@ public class HRegionServer extends HasThread implements final AtomicBoolean online = new AtomicBoolean(false); // zookeeper connection and watcher - protected ZooKeeperWatcher zooKeeper; + protected ZKWatcher zooKeeper; // master address tracker private MasterAddressTracker masterAddressTracker; @@ -601,7 +600,7 @@ public class HRegionServer extends HasThread implements rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); // login the zookeeper client principal (if using security) - ZKUtil.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, + ZooKeeperUtil.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL, hostName); // login the server principal (if using secure Hadoop) login(userProvider, hostName); @@ -628,7 +627,7 @@ public class HRegionServer extends HasThread implements // Some unit tests don't need a cluster, so no zookeeper at all if (!conf.getBoolean("hbase.testing.nocluster", false)) { // Open connection to zookeeper and set primary watcher - zooKeeper = new ZooKeeperWatcher(conf, getProcessName() + ":" + + zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this, canCreateBaseZNode()); // If no master in cluster, skip trying to track one or look for a cluster status. @@ -919,7 +918,7 @@ public class HRegionServer extends HasThread implements * @throws IOException any IO exception, plus if the RS is stopped * @throws InterruptedException */ - private void blockAndCheckIfStopped(ZooKeeperNodeTracker tracker) + private void blockAndCheckIfStopped(ZKNodeTracker tracker) throws IOException, InterruptedException { while (tracker.blockUntilAvailable(this.msgInterval, false) == null) { if (this.stopped) { @@ -1548,7 +1547,7 @@ public class HRegionServer extends HasThread implements // Set up ZK LOG.info("Serving as " + this.serverName + ", RpcServer on " + rpcServices.isa + ", sessionid=0x" + - Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())); + Long.toHexString(this.zooKeeper.getRecoverableZK().getSessionId())); // Wake up anyone waiting for this server to online synchronized (online) { @@ -1598,11 +1597,11 @@ public class HRegionServer extends HasThread implements rsInfo.setInfoPort(infoServer != null ? infoServer.getPort() : -1); rsInfo.setVersionInfo(ProtobufUtil.getVersionInfo()); byte[] data = ProtobufUtil.prependPBMagic(rsInfo.build().toByteArray()); - ZKUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data); + ZooKeeperUtil.createEphemeralNodeAndWatch(this.zooKeeper, getMyEphemeralNodePath(), data); } private void deleteMyEphemeralNode() throws KeeperException { - ZKUtil.deleteNode(this.zooKeeper, getMyEphemeralNodePath()); + ZooKeeperUtil.deleteNode(this.zooKeeper, getMyEphemeralNodePath()); } @Override @@ -2908,7 +2907,7 @@ public class HRegionServer extends HasThread implements } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zooKeeper; } @@ -3548,7 +3547,7 @@ public class HRegionServer extends HasThread implements } private String getMyEphemeralNodePath() { - return ZKUtil.joinZNode(this.zooKeeper.znodePaths.rsZNode, getServerName().toString()); + return ZooKeeperUtil.joinZNode(this.zooKeeper.znodePaths.rsZNode, getServerName().toString()); } private boolean isHealthCheckerConfigured() { @@ -3578,7 +3577,7 @@ public class HRegionServer extends HasThread implements } RegionInfo regionInfo = r.getRegionInfo(); - ZooKeeperWatcher zkw = getZooKeeper(); + ZKWatcher zkw = getZooKeeper(); String previousRSName = this.getLastFailedRSFromZK(regionInfo.getEncodedName()); Map maxSeqIdInStores = r.getMaxStoreSeqId(); long minSeqIdForLogReplay = -1; @@ -3590,25 +3589,25 @@ public class HRegionServer extends HasThread implements try { long lastRecordedFlushedSequenceId = -1; - String nodePath = ZKUtil.joinZNode(this.zooKeeper.znodePaths.recoveringRegionsZNode, + String nodePath = ZooKeeperUtil.joinZNode(this.zooKeeper.znodePaths.recoveringRegionsZNode, regionInfo.getEncodedName()); // recovering-region level byte[] data; try { - data = ZKUtil.getData(zkw, nodePath); + data = ZooKeeperUtil.getData(zkw, nodePath); } catch (InterruptedException e) { throw new InterruptedIOException(); } if (data != null) { - lastRecordedFlushedSequenceId = ZKSplitLog.parseLastFlushedSequenceIdFrom(data); + lastRecordedFlushedSequenceId = ZKUtil.parseLastFlushedSequenceIdFrom(data); } if (data == null || lastRecordedFlushedSequenceId < minSeqIdForLogReplay) { - ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay)); + ZooKeeperUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay)); } if (previousRSName != null) { // one level deeper for the failed RS - nodePath = ZKUtil.joinZNode(nodePath, previousRSName); - ZKUtil.setData(zkw, nodePath, + nodePath = ZooKeeperUtil.joinZNode(nodePath, previousRSName); + ZooKeeperUtil.setData(zkw, nodePath, ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores)); LOG.debug("Update last flushed sequence id of region " + regionInfo.getEncodedName() + " for " + previousRSName); @@ -3630,16 +3629,17 @@ public class HRegionServer extends HasThread implements private String getLastFailedRSFromZK(String encodedRegionName) throws KeeperException { String result = null; long maxZxid = 0; - ZooKeeperWatcher zkw = this.getZooKeeper(); - String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName); - List failedServers = ZKUtil.listChildrenNoWatch(zkw, nodePath); + ZKWatcher zkw = this.getZooKeeper(); + String nodePath = ZooKeeperUtil + .joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName); + List failedServers = ZooKeeperUtil.listChildrenNoWatch(zkw, nodePath); if (failedServers == null || failedServers.isEmpty()) { return result; } for (String failedServer : failedServers) { - String rsPath = ZKUtil.joinZNode(nodePath, failedServer); + String rsPath = ZooKeeperUtil.joinZNode(nodePath, failedServer); Stat stat = new Stat(); - ZKUtil.getDataNoWatch(zkw, rsPath, stat); + ZooKeeperUtil.getDataNoWatch(zkw, rsPath, stat); if (maxZxid < stat.getCzxid()) { maxZxid = stat.getCzxid(); result = failedServer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 515b1eae0c..0ac21c4385 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WALProvider; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.yetus.audience.InterfaceAudience; @@ -187,7 +187,7 @@ class MetricsRegionServerWrapperImpl @Override public String getZookeeperQuorum() { - ZooKeeperWatcher zk = regionServer.getZooKeeper(); + ZKWatcher zk = regionServer.getZooKeeper(); if (zk == null) { return ""; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RecoveringRegionWatcher.java similarity index 86% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RecoveringRegionWatcher.java index 16485ee476..6876ed0f53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RecoveringRegionWatcher.java @@ -16,12 +16,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.zookeeper; +package org.apache.hadoop.hbase.regionserver; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler; import org.apache.zookeeper.KeeperException; @@ -29,7 +32,7 @@ import org.apache.zookeeper.KeeperException; * Watcher used to be notified of the recovering region coming out of recovering state */ @InterfaceAudience.Private -public class RecoveringRegionWatcher extends ZooKeeperListener { +public class RecoveringRegionWatcher extends ZKListener { private static final Log LOG = LogFactory.getLog(RecoveringRegionWatcher.class); private HRegionServer server; @@ -37,7 +40,7 @@ public class RecoveringRegionWatcher extends ZooKeeperListener { /** * Construct a ZooKeeper event listener. */ - public RecoveringRegionWatcher(ZooKeeperWatcher watcher, HRegionServer server) { + public RecoveringRegionWatcher(ZKWatcher watcher, HRegionServer server) { super(watcher); watcher.registerListener(this); this.server = server; @@ -84,7 +87,7 @@ public class RecoveringRegionWatcher extends ZooKeeperListener { } try { - ZKUtil.getDataAndWatch(watcher, path); + ZooKeeperUtil.getDataAndWatch(watcher, path); } catch (KeeperException e) { LOG.warn("Can't register watcher on znode " + path, e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index a4b4387997..ff01925277 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -57,7 +58,7 @@ import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -394,7 +395,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { @Override public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; - ZooKeeperWatcher zkw = rss.getZooKeeper(); + ZKWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index a34bedd5ba..d8208ca3a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -26,13 +26,14 @@ import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.AuthFailedException; import org.apache.zookeeper.KeeperException.ConnectionLossException; @@ -50,7 +51,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint private static final Log LOG = LogFactory.getLog(HBaseReplicationEndpoint.class); - private ZooKeeperWatcher zkw = null; // FindBugs: MT_CORRECTNESS + private ZKWatcher zkw = null; // FindBugs: MT_CORRECTNESS private List regionServers = new ArrayList<>(0); private long lastRegionServerUpdate; @@ -123,7 +124,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * Get the ZK connection to this peer * @return zk connection */ - protected ZooKeeperWatcher getZkw() { + protected ZKWatcher getZkw() { return zkw; } @@ -133,7 +134,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint */ void reloadZkWatcher() throws IOException { if (zkw != null) zkw.close(); - zkw = new ZooKeeperWatcher(ctx.getConfiguration(), + zkw = new ZKWatcher(ctx.getConfiguration(), "connection to cluster: " + ctx.getPeerId(), this); getZkw().registerListener(new PeerRegionServerListener(this)); } @@ -155,9 +156,9 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * @param zkw zk connection to use * @return list of region server addresses or an empty list if the slave is unavailable */ - protected static List fetchSlavesAddresses(ZooKeeperWatcher zkw) + protected static List fetchSlavesAddresses(ZKWatcher zkw) throws KeeperException { - List children = ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.znodePaths.rsZNode); + List children = ZooKeeperUtil.listChildrenAndWatchForNewChildren(zkw, zkw.znodePaths.rsZNode); if (children == null) { return Collections.emptyList(); } @@ -210,7 +211,7 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint /** * Tracks changes to the list of region servers in a peer's cluster. */ - public static class PeerRegionServerListener extends ZooKeeperListener { + public static class PeerRegionServerListener extends ZKListener { private final HBaseReplicationEndpoint replicationEndpoint; private final String regionServerListNode; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java index a14bd0101d..c944204f8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java @@ -28,14 +28,13 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -45,7 +44,7 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate { private static final Log LOG = LogFactory.getLog(ReplicationHFileCleaner.class); - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private ReplicationQueuesClient rqc; private boolean stopped = false; @@ -130,14 +129,14 @@ public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate { // I can close myself when time comes. Configuration conf = new Configuration(config); try { - setConf(conf, new ZooKeeperWatcher(conf, "replicationHFileCleaner", null)); + setConf(conf, new ZKWatcher(conf, "replicationHFileCleaner", null)); } catch (IOException e) { LOG.error("Error while configuring " + this.getClass().getName(), e); } } @VisibleForTesting - public void setConf(Configuration conf, ZooKeeperWatcher zk) { + public void setConf(Configuration conf, ZKWatcher zk) { super.setConf(conf); try { initReplicationQueuesClient(conf, zk); @@ -146,7 +145,7 @@ public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate { } } - private void initReplicationQueuesClient(Configuration conf, ZooKeeperWatcher zk) + private void initReplicationQueuesClient(Configuration conf, ZKWatcher zk) throws Exception { this.zkw = zk; this.rqc = ReplicationFactory.getReplicationQueuesClient(new ReplicationQueuesClientArguments( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index 3dcb332b1f..b9a3920498 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.replication.master; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -30,7 +31,7 @@ import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import java.io.IOException; import java.util.Collections; @@ -48,7 +49,7 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ReplicationLogCleaner extends BaseLogCleanerDelegate { private static final Log LOG = LogFactory.getLog(ReplicationLogCleaner.class); - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private ReplicationQueuesClient replicationQueues; private boolean stopped = false; private Set wals; @@ -101,14 +102,14 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate { // I can close myself when comes time. Configuration conf = new Configuration(config); try { - setConf(conf, new ZooKeeperWatcher(conf, "replicationLogCleaner", null)); + setConf(conf, new ZKWatcher(conf, "replicationLogCleaner", null)); } catch (IOException e) { LOG.error("Error while configuring " + this.getClass().getName(), e); } } @VisibleForTesting - public void setConf(Configuration conf, ZooKeeperWatcher zk) { + public void setConf(Configuration conf, ZKWatcher zk) { super.setConf(conf); try { this.zkw = zk; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java index 0585c97c3f..499eefde47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java @@ -23,6 +23,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; @@ -30,8 +32,6 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationStateZKBase; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import java.io.IOException; @@ -47,7 +47,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase { private static final Log LOG = LogFactory.getLog(TableCFsUpdater.class); - public TableCFsUpdater(ZooKeeperWatcher zookeeper, + public TableCFsUpdater(ZKWatcher zookeeper, Configuration conf, Abortable abortable) { super(zookeeper, conf, abortable); } @@ -55,7 +55,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase { public void update() { List znodes = null; try { - znodes = ZKUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); + znodes = ZooKeeperUtil.listChildrenNoWatch(this.zookeeper, this.peersZNode); } catch (KeeperException e) { LOG.error("Failed to get peers znode", e); } @@ -71,7 +71,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase { public boolean update(String peerId) { String tableCFsNode = getTableCFsNode(peerId); try { - if (ZKUtil.checkExists(zookeeper, tableCFsNode) != -1) { + if (ZooKeeperUtil.checkExists(zookeeper, tableCFsNode) != -1) { String peerNode = getPeerNode(peerId); ReplicationPeerConfig rpc = getReplicationPeerConig(peerNode); // We only need to copy data from tableCFs node to rpc Node the first time hmaster start. @@ -80,10 +80,10 @@ public class TableCFsUpdater extends ReplicationStateZKBase { LOG.info("copy tableCFs into peerNode:" + peerId); ReplicationProtos.TableCF[] tableCFs = ReplicationSerDeHelper.parseTableCFs( - ZKUtil.getData(this.zookeeper, tableCFsNode)); + ZooKeeperUtil.getData(this.zookeeper, tableCFsNode)); if (tableCFs != null && tableCFs.length > 0) { rpc.setTableCFsMap(ReplicationSerDeHelper.convert2Map(tableCFs)); - ZKUtil.setData(this.zookeeper, peerNode, + ZooKeeperUtil.setData(this.zookeeper, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); } } else { @@ -106,7 +106,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase { private ReplicationPeerConfig getReplicationPeerConig(String peerNode) throws KeeperException, InterruptedException { byte[] data = null; - data = ZKUtil.getData(this.zookeeper, peerNode); + data = ZooKeeperUtil.getData(this.zookeeper, peerNode); if (data == null) { LOG.error("Could not get configuration for " + "peer because it doesn't exist. peer=" + peerNode); @@ -137,7 +137,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase { printUsageAndExit(); } else if (args[0].equals("update")) { Configuration conf = HBaseConfiguration.create(); - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TableCFsUpdater", null); + ZKWatcher zkw = new ZKWatcher(conf, "TableCFsUpdater", null); try { TableCFsUpdater tableCFsUpdater = new TableCFsUpdater(zkw, conf, null); tableCFsUpdater.update(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index 9d38026592..a6ee84127c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments; import org.apache.hadoop.hbase.replication.ReplicationTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.zookeeper.KeeperException; @@ -211,7 +211,7 @@ public class DumpReplicationQueues extends Configured implements Tool { ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), + ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), new WarnOnlyAbortable(), true); try { @@ -302,7 +302,7 @@ public class DumpReplicationQueues extends Configured implements Tool { return sb.toString(); } - public String dumpQueues(ClusterConnection connection, ZooKeeperWatcher zkw, Set peerIds, + public String dumpQueues(ClusterConnection connection, ZKWatcher zkw, Set peerIds, boolean hdfs) throws Exception { ReplicationQueuesClient queuesClient; ReplicationPeers replicationPeers; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 280289c4fa..45be9d3c40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -77,7 +78,7 @@ public class ReplicationSyncUp extends Configured implements Tool { ReplicationSourceManager manager; FileSystem fs; Path oldLogDir, logDir, walRootDir; - ZooKeeperWatcher zkw; + ZKWatcher zkw; Abortable abortable = new Abortable() { @Override @@ -91,7 +92,7 @@ public class ReplicationSyncUp extends Configured implements Tool { }; zkw = - new ZooKeeperWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable, + new ZKWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable, true); walRootDir = FSUtils.getWALRootDir(conf); @@ -123,9 +124,9 @@ public class ReplicationSyncUp extends Configured implements Tool { static class DummyServer implements Server { String hostname; - ZooKeeperWatcher zkw; + ZKWatcher zkw; - DummyServer(ZooKeeperWatcher zkw) { + DummyServer(ZKWatcher zkw) { // an unique name in case the first run fails hostname = System.currentTimeMillis() + ".SyncUpTool.replication.org"; this.zkw = zkw; @@ -141,7 +142,7 @@ public class ReplicationSyncUp extends Configured implements Tool { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 4e67f6ea41..f9298d352f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.access; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; @@ -747,7 +746,7 @@ public class AccessControlLists { // Deserialize the table permissions from the KV // TODO: This can be improved. Don't build UsersAndPermissions just to unpack it again, // use the builder - AccessControlProtos.UsersAndPermissions.Builder builder = + AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder(); if (tag.hasArray()) { ProtobufUtil.mergeFrom(builder, tag.getValueArray(), tag.getValueOffset(), tag.getValueLength()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 7e43c9d868..0647747969 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -130,7 +130,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.SimpleMutableByteRange; import org.apache.hadoop.hbase.wal.WALEdit; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; @@ -953,7 +953,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, + " accordingly."); } - ZooKeeperWatcher zk = null; + ZKWatcher zk = null; if (env instanceof MasterCoprocessorEnvironment) { // if running on HMaster MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment)env; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index 0db05c2d8b..e62baaa1e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -39,13 +39,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -112,7 +112,7 @@ public class TableAuthManager implements Closeable { private ZKPermissionWatcher zkperms; private final AtomicLong mtime = new AtomicLong(0L); - private TableAuthManager(ZooKeeperWatcher watcher, Configuration conf) + private TableAuthManager(ZKWatcher watcher, Configuration conf) throws IOException { this.conf = conf; @@ -734,14 +734,14 @@ public class TableAuthManager implements Closeable { return mtime.get(); } - private static Map managerMap = new HashMap<>(); + private static Map managerMap = new HashMap<>(); private static Map refCount = new HashMap<>(); /** Returns a TableAuthManager from the cache. If not cached, constructs a new one. Returned * instance should be released back by calling {@link #release(TableAuthManager)}. */ public synchronized static TableAuthManager getOrCreate( - ZooKeeperWatcher watcher, Configuration conf) throws IOException { + ZKWatcher watcher, Configuration conf) throws IOException { TableAuthManager instance = managerMap.get(watcher); if (instance == null) { instance = new TableAuthManager(watcher, conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 09a1771eff..11811c12eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -20,14 +20,14 @@ package org.apache.hadoop.hbase.security.access; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import java.io.Closeable; @@ -40,7 +40,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.atomic.AtomicReference; /** * Handles synchronization of access control list entries and updates @@ -52,7 +51,7 @@ import java.util.concurrent.atomic.AtomicReference; * trigger updates in the {@link TableAuthManager} permission cache. */ @InterfaceAudience.Private -public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable { +public class ZKPermissionWatcher extends ZKListener implements Closeable { private static final Log LOG = LogFactory.getLog(ZKPermissionWatcher.class); // parent node for permissions lists static final String ACL_NODE = "acl"; @@ -62,12 +61,12 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable private final ExecutorService executor; private Future childrenChangedFuture; - public ZKPermissionWatcher(ZooKeeperWatcher watcher, + public ZKPermissionWatcher(ZKWatcher watcher, TableAuthManager authManager, Configuration conf) { super(watcher); this.authManager = authManager; String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE); - this.aclZNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, aclZnodeParent); + this.aclZNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, aclZnodeParent); executor = Executors.newSingleThreadExecutor( new DaemonThreadFactory("zk-permission-watcher")); } @@ -75,13 +74,13 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable public void start() throws KeeperException { try { watcher.registerListener(this); - if (ZKUtil.watchAndCheckExists(watcher, aclZNode)) { + if (ZooKeeperUtil.watchAndCheckExists(watcher, aclZNode)) { try { executor.submit(new Callable() { @Override public Void call() throws KeeperException { - List existing = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + List existing = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); if (existing != null) { refreshNodes(existing); } @@ -125,8 +124,8 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable @Override public void run() { try { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.error("Error reading data from zookeeper", ke); @@ -141,11 +140,11 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable @Override public void nodeDeleted(final String path) { waitUntilStarted(); - if (aclZNode.equals(ZKUtil.getParent(path))) { + if (aclZNode.equals(ZooKeeperUtil.getParent(path))) { asyncProcessNodeUpdate(new Runnable() { @Override public void run() { - String table = ZKUtil.getNodeName(path); + String table = ZooKeeperUtil.getNodeName(path); if(AccessControlLists.isNamespaceEntry(table)) { authManager.removeNamespace(Bytes.toBytes(table)); } else { @@ -159,14 +158,14 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable @Override public void nodeDataChanged(final String path) { waitUntilStarted(); - if (aclZNode.equals(ZKUtil.getParent(path))) { + if (aclZNode.equals(ZooKeeperUtil.getParent(path))) { asyncProcessNodeUpdate(new Runnable() { @Override public void run() { // update cache on an existing table node - String entry = ZKUtil.getNodeName(path); + String entry = ZooKeeperUtil.getNodeName(path); try { - byte[] data = ZKUtil.getDataAndWatch(watcher, path); + byte[] data = ZooKeeperUtil.getDataAndWatch(watcher, path); refreshAuthManager(entry, data); } catch (KeeperException ke) { LOG.error("Error reading data from zookeeper for node " + entry, ke); @@ -186,8 +185,8 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable waitUntilStarted(); if (path.equals(aclZNode)) { try { - final List nodeList = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); + final List nodeList = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, aclZNode); // preempt any existing nodeChildrenChanged event processing if (childrenChangedFuture != null && !childrenChangedFuture.isDone()) { boolean cancelled = childrenChangedFuture.cancel(true); @@ -222,15 +221,15 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable return null; // No task launched so there will be nothing to cancel later } - private void refreshNodes(List nodes) { - for (ZKUtil.NodeAndData n : nodes) { + private void refreshNodes(List nodes) { + for (ZooKeeperUtil.NodeAndData n : nodes) { if (Thread.interrupted()) { // Use Thread.interrupted so that we clear interrupt status break; } if (n.isEmpty()) continue; String path = n.getNode(); - String entry = (ZKUtil.getNodeName(path)); + String entry = (ZooKeeperUtil.getNodeName(path)); try { refreshAuthManager(entry, n.getData()); } catch (IOException ioe) { @@ -260,12 +259,12 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable */ public void writeToZookeeper(byte[] entry, byte[] permsData) { String entryName = Bytes.toString(entry); - String zkNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE); - zkNode = ZKUtil.joinZNode(zkNode, entryName); + String zkNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE); + zkNode = ZooKeeperUtil.joinZNode(zkNode, entryName); try { - ZKUtil.createWithParents(watcher, zkNode); - ZKUtil.updateExistingNodeData(watcher, zkNode, permsData, -1); + ZooKeeperUtil.createWithParents(watcher, zkNode); + ZooKeeperUtil.updateExistingNodeData(watcher, zkNode, permsData, -1); } catch (KeeperException e) { LOG.error("Failed updating permissions for entry '" + entryName + "'", e); @@ -278,11 +277,11 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable * @param tableName */ public void deleteTableACLNode(final TableName tableName) { - String zkNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE); - zkNode = ZKUtil.joinZNode(zkNode, tableName.getNameAsString()); + String zkNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE); + zkNode = ZooKeeperUtil.joinZNode(zkNode, tableName.getNameAsString()); try { - ZKUtil.deleteNode(watcher, zkNode); + ZooKeeperUtil.deleteNode(watcher, zkNode); } catch (KeeperException.NoNodeException e) { LOG.warn("No acl notify node of table '" + tableName + "'"); } catch (KeeperException e) { @@ -295,11 +294,11 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable * Delete the acl notify node of namespace */ public void deleteNamespaceACLNode(final String namespace) { - String zkNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE); - zkNode = ZKUtil.joinZNode(zkNode, AccessControlLists.NAMESPACE_PREFIX + namespace); + String zkNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, ACL_NODE); + zkNode = ZooKeeperUtil.joinZNode(zkNode, AccessControlLists.NAMESPACE_PREFIX + namespace); try { - ZKUtil.deleteNode(watcher, zkNode); + ZooKeeperUtil.deleteNode(watcher, zkNode); } catch (KeeperException.NoNodeException e) { LOG.warn("No acl notify node of namespace '" + namespace + "'"); } catch (KeeperException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 718e8e0dc1..559580c9a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -27,15 +27,15 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKLeaderManager; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; @@ -89,7 +89,7 @@ public class AuthenticationTokenSecretManager * org.apache.hadoop.hbase.ipc.SecureServer so public access is needed. */ public AuthenticationTokenSecretManager(Configuration conf, - ZooKeeperWatcher zk, String serverName, + ZKWatcher zk, String serverName, long keyUpdateInterval, long tokenMaxLifetime) { this.zkWatcher = new ZKSecretWatcher(conf, zk, this); this.keyUpdateInterval = keyUpdateInterval; @@ -144,9 +144,9 @@ public class AuthenticationTokenSecretManager AuthenticationKey masterKey = allKeys.get(identifier.getKeyId()); if(masterKey == null) { if(zkWatcher.getWatcher().isAborted()) { - LOG.error("ZooKeeperWatcher is abort"); + LOG.error("ZKWatcher is abort"); throw new InvalidToken("Token keys could not be sync from zookeeper" - + " because of ZooKeeperWatcher abort"); + + " because of ZKWatcher abort"); } synchronized (this) { if (!leaderElector.isAlive() || leaderElector.isStopped()) { @@ -254,7 +254,7 @@ public class AuthenticationTokenSecretManager } } } - + synchronized boolean isCurrentKeyRolled() { return currentKey != null; } @@ -297,11 +297,11 @@ public class AuthenticationTokenSecretManager private boolean isMaster = false; private ZKLeaderManager zkLeader; - public LeaderElector(ZooKeeperWatcher watcher, String serverName) { + public LeaderElector(ZKWatcher watcher, String serverName) { setDaemon(true); setName("ZKSecretWatcher-leaderElector"); zkLeader = new ZKLeaderManager(watcher, - ZKUtil.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), + ZooKeeperUtil.joinZNode(zkWatcher.getRootKeyZNode(), "keymaster"), Bytes.toBytes(serverName), this); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 814fe073f6..e820e8e229 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapreduce.Job; @@ -289,7 +289,7 @@ public class TokenUtil { */ private static Token getAuthToken(Configuration conf, User user) throws IOException, InterruptedException { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TokenUtil-getAuthToken", null); + ZKWatcher zkw = new ZKWatcher(conf, "TokenUtil-getAuthToken", null); try { String clusterId = ZKClusterId.readClusterIdZNode(zkw); if (clusterId == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java index 42dc3a9643..f7bf136a34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ZKSecretWatcher.java @@ -25,19 +25,19 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.zookeeper.KeeperException; /** * Synchronizes token encryption keys across cluster nodes. */ @InterfaceAudience.Private -public class ZKSecretWatcher extends ZooKeeperListener { +public class ZKSecretWatcher extends ZKListener { private static final String DEFAULT_ROOT_NODE = "tokenauth"; private static final String DEFAULT_KEYS_PARENT = "keys"; private static final Log LOG = LogFactory.getLog(ZKSecretWatcher.class); @@ -47,23 +47,23 @@ public class ZKSecretWatcher extends ZooKeeperListener { private String keysParentZNode; public ZKSecretWatcher(Configuration conf, - ZooKeeperWatcher watcher, + ZKWatcher watcher, AuthenticationTokenSecretManager secretManager) { super(watcher); this.secretManager = secretManager; String keyZNodeParent = conf.get("zookeeper.znode.tokenauth.parent", DEFAULT_ROOT_NODE); - this.baseKeyZNode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, keyZNodeParent); - this.keysParentZNode = ZKUtil.joinZNode(baseKeyZNode, DEFAULT_KEYS_PARENT); + this.baseKeyZNode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, keyZNodeParent); + this.keysParentZNode = ZooKeeperUtil.joinZNode(baseKeyZNode, DEFAULT_KEYS_PARENT); } public void start() throws KeeperException { watcher.registerListener(this); // make sure the base node exists - ZKUtil.createWithParents(watcher, keysParentZNode); + ZooKeeperUtil.createWithParents(watcher, keysParentZNode); - if (ZKUtil.watchAndCheckExists(watcher, keysParentZNode)) { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + if (ZooKeeperUtil.watchAndCheckExists(watcher, keysParentZNode)) { + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } } @@ -72,8 +72,8 @@ public class ZKSecretWatcher extends ZooKeeperListener { public void nodeCreated(String path) { if (path.equals(keysParentZNode)) { try { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.fatal("Error reading data from zookeeper", ke); @@ -84,8 +84,8 @@ public class ZKSecretWatcher extends ZooKeeperListener { @Override public void nodeDeleted(String path) { - if (keysParentZNode.equals(ZKUtil.getParent(path))) { - String keyId = ZKUtil.getNodeName(path); + if (keysParentZNode.equals(ZooKeeperUtil.getParent(path))) { + String keyId = ZooKeeperUtil.getNodeName(path); try { Integer id = Integer.valueOf(keyId); secretManager.removeKey(id); @@ -97,9 +97,9 @@ public class ZKSecretWatcher extends ZooKeeperListener { @Override public void nodeDataChanged(String path) { - if (keysParentZNode.equals(ZKUtil.getParent(path))) { + if (keysParentZNode.equals(ZooKeeperUtil.getParent(path))) { try { - byte[] data = ZKUtil.getDataAndWatch(watcher, path); + byte[] data = ZooKeeperUtil.getDataAndWatch(watcher, path); if (data == null || data.length == 0) { LOG.debug("Ignoring empty node "+path); return; @@ -123,8 +123,8 @@ public class ZKSecretWatcher extends ZooKeeperListener { if (path.equals(keysParentZNode)) { // keys changed try { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.fatal("Error reading data from zookeeper", ke); @@ -137,10 +137,10 @@ public class ZKSecretWatcher extends ZooKeeperListener { return baseKeyZNode; } - private void refreshNodes(List nodes) { - for (ZKUtil.NodeAndData n : nodes) { + private void refreshNodes(List nodes) { + for (ZooKeeperUtil.NodeAndData n : nodes) { String path = n.getNode(); - String keyId = ZKUtil.getNodeName(path); + String keyId = ZooKeeperUtil.getNodeName(path); try { byte[] data = n.getData(); if (data == null || data.length == 0) { @@ -159,13 +159,13 @@ public class ZKSecretWatcher extends ZooKeeperListener { } private String getKeyNode(int keyId) { - return ZKUtil.joinZNode(keysParentZNode, Integer.toString(keyId)); + return ZooKeeperUtil.joinZNode(keysParentZNode, Integer.toString(keyId)); } public void removeKeyFromZK(AuthenticationKey key) { String keyZNode = getKeyNode(key.getKeyId()); try { - ZKUtil.deleteNode(watcher, keyZNode); + ZooKeeperUtil.deleteNode(watcher, keyZNode); } catch (KeeperException.NoNodeException nne) { LOG.error("Non-existent znode "+keyZNode+" for key "+key.getKeyId(), nne); } catch (KeeperException ke) { @@ -181,7 +181,7 @@ public class ZKSecretWatcher extends ZooKeeperListener { try { byte[] keyData = Writables.getBytes(key); // TODO: is there any point in retrying beyond what ZK client does? - ZKUtil.createSetData(watcher, keyZNode, keyData); + ZooKeeperUtil.createSetData(watcher, keyZNode, keyData); } catch (KeeperException ke) { LOG.fatal("Unable to synchronize master key "+key.getKeyId()+ " to znode "+keyZNode, ke); @@ -198,10 +198,10 @@ public class ZKSecretWatcher extends ZooKeeperListener { try { byte[] keyData = Writables.getBytes(key); try { - ZKUtil.updateExistingNodeData(watcher, keyZNode, keyData, -1); + ZooKeeperUtil.updateExistingNodeData(watcher, keyZNode, keyData, -1); } catch (KeeperException.NoNodeException ne) { // node was somehow removed, try adding it back - ZKUtil.createSetData(watcher, keyZNode, keyData); + ZooKeeperUtil.createSetData(watcher, keyZNode, keyData); } } catch (KeeperException ke) { LOG.fatal("Unable to update master key "+key.getKeyId()+ @@ -213,21 +213,21 @@ public class ZKSecretWatcher extends ZooKeeperListener { watcher.abort("Failed serializing key "+key.getKeyId(), ioe); } } - + /** * refresh keys */ synchronized void refreshKeys() { try { - List nodes = - ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); + List nodes = + ZooKeeperUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode); refreshNodes(nodes); } catch (KeeperException ke) { LOG.fatal("Error reading data from zookeeper", ke); watcher.abort("Error reading changed keys from zookeeper", ke); } } - + /** * get token keys parent node * @return token keys parent node diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index e913b21fb1..8272da6948 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; @@ -67,7 +68,6 @@ import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @InterfaceAudience.Private public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService { @@ -120,7 +120,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService // This is a CoreCoprocessor. On creation, we should have gotten an environment that // implements HasRegionServerServices so we can get at RSS. FIX!!!! Integrate this CP as // native service. - ZooKeeperWatcher zk = ((HasRegionServerServices)e).getRegionServerServices().getZooKeeper(); + ZKWatcher zk = ((HasRegionServerServices)e).getRegionServerServices().getZooKeeper(); try { labelsCache = VisibilityLabelsCache.createAndGet(zk, this.conf); } catch (IOException ioe) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java index 2edf6365ad..f2ec329e5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsCache.java @@ -31,13 +31,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabel; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -64,7 +64,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { */ private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - private VisibilityLabelsCache(ZooKeeperWatcher watcher, Configuration conf) throws IOException { + private VisibilityLabelsCache(ZKWatcher watcher, Configuration conf) throws IOException { zkVisibilityWatcher = new ZKVisibilityLabelWatcher(watcher, this, conf); try { zkVisibilityWatcher.start(); @@ -81,7 +81,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { * @return Singleton instance of VisibilityLabelsCache * @throws IOException */ - public synchronized static VisibilityLabelsCache createAndGet(ZooKeeperWatcher watcher, + public synchronized static VisibilityLabelsCache createAndGet(ZKWatcher watcher, Configuration conf) throws IOException { // VisibilityLabelService#init() for different regions (in same RS) passes same instance of // watcher as all get the instance from RS. @@ -99,7 +99,7 @@ public class VisibilityLabelsCache implements VisibilityLabelOrdinalProvider { * @return Singleton instance of VisibilityLabelsCache * @throws IllegalStateException * when this is called before calling - * {@link #createAndGet(ZooKeeperWatcher, Configuration)} + * {@link #createAndGet(ZKWatcher, Configuration)} */ public static VisibilityLabelsCache get() { // By the time this method is called, the singleton instance of VisibilityLabelsCache should diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index 3fb66b87a9..b46886cf40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -123,7 +123,7 @@ public class VisibilityUtils { /** * Reads back from the zookeeper. The data read here is of the form written by * writeToZooKeeper(Map<byte[], Integer> entries). - * + * * @param data * @return Labels and their ordinal details * @throws DeserializationException @@ -149,7 +149,7 @@ public class VisibilityUtils { * @return User auth details * @throws DeserializationException */ - public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) + public static MultiUserAuthorizations readUserAuthsFromZKData(byte[] data) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(data)) { int pblen = ProtobufUtil.lengthOfPBMagic(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java index 9b5a44a67d..5ba01e6c29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/ZKVisibilityLabelWatcher.java @@ -21,11 +21,11 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** @@ -33,12 +33,12 @@ import org.apache.zookeeper.KeeperException; * /hbase/visibility_labels and will have a serialized form of a set of labels in the system. */ @InterfaceAudience.Private -public class ZKVisibilityLabelWatcher extends ZooKeeperListener { +public class ZKVisibilityLabelWatcher extends ZKListener { private static final Log LOG = LogFactory.getLog(ZKVisibilityLabelWatcher.class); private static final String VISIBILITY_LABEL_ZK_PATH = "zookeeper.znode.visibility.label.parent"; private static final String DEFAULT_VISIBILITY_LABEL_NODE = "visibility/labels"; - private static final String VISIBILITY_USER_AUTHS_ZK_PATH = + private static final String VISIBILITY_USER_AUTHS_ZK_PATH = "zookeeper.znode.visibility.user.auths.parent"; private static final String DEFAULT_VISIBILITY_USER_AUTHS_NODE = "visibility/user_auths"; @@ -46,26 +46,26 @@ public class ZKVisibilityLabelWatcher extends ZooKeeperListener { private String labelZnode; private String userAuthsZnode; - public ZKVisibilityLabelWatcher(ZooKeeperWatcher watcher, VisibilityLabelsCache labelsCache, + public ZKVisibilityLabelWatcher(ZKWatcher watcher, VisibilityLabelsCache labelsCache, Configuration conf) { super(watcher); this.labelsCache = labelsCache; String labelZnodeParent = conf.get(VISIBILITY_LABEL_ZK_PATH, DEFAULT_VISIBILITY_LABEL_NODE); String userAuthsZnodeParent = conf.get(VISIBILITY_USER_AUTHS_ZK_PATH, DEFAULT_VISIBILITY_USER_AUTHS_NODE); - this.labelZnode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, labelZnodeParent); - this.userAuthsZnode = ZKUtil.joinZNode(watcher.znodePaths.baseZNode, userAuthsZnodeParent); + this.labelZnode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, labelZnodeParent); + this.userAuthsZnode = ZooKeeperUtil.joinZNode(watcher.znodePaths.baseZNode, userAuthsZnodeParent); } public void start() throws KeeperException { watcher.registerListener(this); - ZKUtil.createWithParents(watcher, labelZnode); - ZKUtil.createWithParents(watcher, userAuthsZnode); - byte[] data = ZKUtil.getDataAndWatch(watcher, labelZnode); + ZooKeeperUtil.createWithParents(watcher, labelZnode); + ZooKeeperUtil.createWithParents(watcher, userAuthsZnode); + byte[] data = ZooKeeperUtil.getDataAndWatch(watcher, labelZnode); if (data != null && data.length > 0) { refreshVisibilityLabelsCache(data); } - data = ZKUtil.getDataAndWatch(watcher, userAuthsZnode); + data = ZooKeeperUtil.getDataAndWatch(watcher, userAuthsZnode); if (data != null && data.length > 0) { refreshUserAuthsCache(data); } @@ -91,7 +91,7 @@ public class ZKVisibilityLabelWatcher extends ZooKeeperListener { public void nodeCreated(String path) { if (path.equals(labelZnode) || path.equals(userAuthsZnode)) { try { - ZKUtil.watchAndCheckExists(watcher, path); + ZooKeeperUtil.watchAndCheckExists(watcher, path); } catch (KeeperException ke) { LOG.error("Error setting watcher on node " + path, ke); // only option is to abort @@ -110,7 +110,7 @@ public class ZKVisibilityLabelWatcher extends ZooKeeperListener { if (path.equals(labelZnode) || path.equals(userAuthsZnode)) { try { watcher.sync(path); - byte[] data = ZKUtil.getDataAndWatch(watcher, path); + byte[] data = ZooKeeperUtil.getDataAndWatch(watcher, path); if (path.equals(labelZnode)) { refreshVisibilityLabelsCache(data); } else { @@ -131,7 +131,7 @@ public class ZKVisibilityLabelWatcher extends ZooKeeperListener { /** * Write a labels mirror or user auths mirror into zookeeper - * + * * @param data * @param labelsOrUserAuths true for writing labels and false for user auths. */ @@ -141,7 +141,7 @@ public class ZKVisibilityLabelWatcher extends ZooKeeperListener { znode = this.userAuthsZnode; } try { - ZKUtil.updateExistingNodeData(watcher, znode, data, -1); + ZooKeeperUtil.updateExistingNodeData(watcher, znode, data, -1); } catch (KeeperException e) { LOG.error("Failed writing to " + znode, e); watcher.abort("Failed writing node " + znode + " to zookeeper", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 934a630a67..e0dbcf7977 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -87,7 +87,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -129,8 +129,8 @@ import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; @@ -315,7 +315,7 @@ public class HBaseFsck extends Configured implements Closeable { private Map> skippedRegions = new HashMap<>(); - private ZooKeeperWatcher zkw = null; + private ZKWatcher zkw = null; private String hbckEphemeralNodePath = null; private boolean hbckZodeCreated = false; @@ -703,12 +703,12 @@ public class HBaseFsck extends Configured implements Closeable { */ private boolean setMasterInMaintenanceMode() throws IOException { RetryCounter retryCounter = createZNodeRetryCounterFactory.create(); - hbckEphemeralNodePath = ZKUtil.joinZNode( + hbckEphemeralNodePath = ZooKeeperUtil.joinZNode( zkw.znodePaths.masterMaintZNode, "hbck-" + Long.toString(EnvironmentEdgeManager.currentTime())); do { try { - hbckZodeCreated = ZKUtil.createEphemeralNodeAndWatch(zkw, hbckEphemeralNodePath, null); + hbckZodeCreated = ZooKeeperUtil.createEphemeralNodeAndWatch(zkw, hbckEphemeralNodePath, null); if (hbckZodeCreated) { break; } @@ -735,7 +735,7 @@ public class HBaseFsck extends Configured implements Closeable { private void cleanupHbckZnode() { try { if (zkw != null && hbckZodeCreated) { - ZKUtil.deleteNode(zkw, hbckEphemeralNodePath); + ZooKeeperUtil.deleteNode(zkw, hbckEphemeralNodePath); hbckZodeCreated = false; } } catch (KeeperException e) { @@ -1917,8 +1917,8 @@ public class HBaseFsck extends Configured implements Closeable { return true; } - private ZooKeeperWatcher createZooKeeperWatcher() throws IOException { - return new ZooKeeperWatcher(getConf(), "hbase Fsck", new Abortable() { + private ZKWatcher createZooKeeperWatcher() throws IOException { + return new ZKWatcher(getConf(), "hbase Fsck", new Abortable() { @Override public void abort(String why, Throwable e) { LOG.error(why, e); @@ -3645,7 +3645,7 @@ public class HBaseFsck extends Configured implements Closeable { private void unassignMetaReplica(HbckInfo hi) throws IOException, InterruptedException, KeeperException { undeployRegions(hi); - ZKUtil.deleteNode(zkw, zkw.znodePaths.getZNodeForReplica(hi.metaEntry.getReplicaId())); + ZooKeeperUtil.deleteNode(zkw, zkw.znodePaths.getZNodeForReplica(hi.metaEntry.getReplicaId())); } private void assignMetaReplica(int replicaId) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index afb6c5b0e1..7cdd6bc7f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.ClusterStatus.Option; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 0f36a7bc41..b1c1f07f03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -62,7 +62,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; /** @@ -900,7 +900,7 @@ public class RegionMover extends AbstractHBaseTool { return null; } if (region.isMetaRegion()) { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(admin.getConfiguration(), "region_mover", null); + ZKWatcher zkw = new ZKWatcher(admin.getConfiguration(), "region_mover", null); MetaTableLocator locator = new MetaTableLocator(); int maxWaitInSeconds = admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index 43c3598755..84ed729370 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -25,13 +25,14 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; /** @@ -51,10 +52,10 @@ public class ZKDataMigrator { * table descriptor based states. */ @Deprecated - public static Map queryForTableStates(ZooKeeperWatcher zkw) + public static Map queryForTableStates(ZKWatcher zkw) throws KeeperException, InterruptedException { Map rv = new HashMap<>(); - List children = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.tableZNode); + List children = ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.tableZNode); if (children == null) return rv; for (String child: children) { @@ -85,17 +86,17 @@ public class ZKDataMigrator { /** * Gets table state from ZK. - * @param zkw ZooKeeperWatcher instance to use + * @param zkw ZKWatcher instance to use * @param tableName table we're checking * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode. * @throws KeeperException */ @Deprecated private static ZooKeeperProtos.DeprecatedTableState.State getTableState( - final ZooKeeperWatcher zkw, final TableName tableName) + final ZKWatcher zkw, final TableName tableName) throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.znodePaths.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); + String znode = ZooKeeperUtil.joinZNode(zkw.znodePaths.tableZNode, tableName.getNameAsString()); + byte [] data = ZooKeeperUtil.getData(zkw, znode); if (data == null || data.length <= 0) return null; try { ProtobufUtil.expectPBMagicPrefix(data); @@ -109,7 +110,7 @@ public class ZKDataMigrator { ke.initCause(e); throw ke; } catch (DeserializationException e) { - throw ZKUtil.convert(e); + throw ZooKeeperUtil.convert(e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java index 9fb8459fc1..ca82c47a80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; /* * Check and fix undeleted replication queues for removed peerId. @@ -47,7 +47,7 @@ public class ReplicationChecker { private Set undeletedHFileRefsQueueIds = new HashSet<>(); private final ReplicationZKNodeCleaner cleaner; - public ReplicationChecker(Configuration conf, ZooKeeperWatcher zkw, ClusterConnection connection, + public ReplicationChecker(Configuration conf, ZKWatcher zkw, ClusterConnection connection, ErrorReporter errorReporter) throws IOException { this.cleaner = new ReplicationZKNodeCleaner(conf, zkw, connection); this.errorReporter = errorReporter; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 215d2ed58e..38e0dec29a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.wal; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination; -import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; @@ -67,7 +66,6 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -76,6 +74,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -119,7 +118,6 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.hbase.wal.WALProvider.Writer; -import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ipc.RemoteException; @@ -209,7 +207,7 @@ public class WALSplitter { outputSink = new LogReplayOutputSink(controller, entryBuffers, numWriterThreads); } else { if (this.distributedLogReplay) { - LOG.info("ZooKeeperWatcher is passed in as NULL so disable distrubitedLogRepaly."); + LOG.info("ZKWatcher is passed in as NULL so disable distrubitedLogRepaly."); } this.distributedLogReplay = false; outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp index 4465162e95..1eb59dab3d 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp @@ -20,11 +20,12 @@ <%@ page contentType="text/html;charset=UTF-8" import="org.apache.commons.lang3.StringEscapeUtils" import="org.apache.hadoop.hbase.zookeeper.ZKUtil" - import="org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher" + import="org.apache.hadoop.hbase.zookeeper.ZKWatcher" import="org.apache.hadoop.hbase.HBaseConfiguration" - import="org.apache.hadoop.hbase.master.HMaster"%><% + import="org.apache.hadoop.hbase.master.HMaster"%> +<% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); - ZooKeeperWatcher watcher = master.getZooKeeper(); + ZKWatcher watcher = master.getZooKeeper(); %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index dce04bdb17..2d0700f70c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.ImmutableHRegionInfo; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; @@ -132,7 +133,7 @@ import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.zookeeper.EmptyWatcher; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -2749,7 +2750,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } } - public void expireSession(ZooKeeperWatcher nodeZK) throws Exception { + public void expireSession(ZKWatcher nodeZK) throws Exception { expireSession(nodeZK, false); } @@ -2764,11 +2765,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param checkStatus - true to check if we can create a Table with the * current configuration. */ - public void expireSession(ZooKeeperWatcher nodeZK, boolean checkStatus) + public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception { Configuration c = new Configuration(this.conf); String quorumServers = ZKConfig.getZKQuorumServersString(c); - ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper(); + ZooKeeper zk = nodeZK.getRecoverableZK().getZooKeeper(); byte[] password = zk.getSessionPasswd(); long sessionID = zk.getSessionId(); @@ -2879,18 +2880,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { private HBaseAdmin hbaseAdmin = null; /** - * Returns a ZooKeeperWatcher instance. + * Returns a ZKWatcher instance. * This instance is shared between HBaseTestingUtility instance users. * Don't close it, it will be closed automatically when the * cluster shutdowns * - * @return The ZooKeeperWatcher instance. + * @return The ZKWatcher instance. * @throws IOException */ - public synchronized ZooKeeperWatcher getZooKeeperWatcher() + public synchronized ZKWatcher getZooKeeperWatcher() throws IOException { if (zooKeeperWatcher == null) { - zooKeeperWatcher = new ZooKeeperWatcher(conf, "testing utility", + zooKeeperWatcher = new ZKWatcher(conf, "testing utility", new Abortable() { @Override public void abort(String why, Throwable e) { throw new RuntimeException("Unexpected abort in HBaseTestingUtility:"+why, e); @@ -2900,7 +2901,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } return zooKeeperWatcher; } - private ZooKeeperWatcher zooKeeperWatcher; + private ZKWatcher zooKeeperWatcher; @@ -3505,13 +3506,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** - * Gets a ZooKeeperWatcher. + * Gets a ZKWatcher. * @param TEST_UTIL */ - public static ZooKeeperWatcher getZooKeeperWatcher( + public static ZKWatcher getZooKeeperWatcher( HBaseTestingUtility TEST_UTIL) throws ZooKeeperConnectionException, IOException { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "unittest", new Abortable() { boolean aborted = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 58a0055ecf..93a6de6d43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -54,7 +54,8 @@ import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -71,18 +72,18 @@ public class MockRegionServerServices implements RegionServerServices { new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); private HFileSystem hfs = null; private final Configuration conf; - private ZooKeeperWatcher zkw = null; + private ZKWatcher zkw = null; private ServerName serverName = null; private RpcServerInterface rpcServer = null; private volatile boolean abortRequested; private volatile boolean stopping = false; private final AtomicBoolean running = new AtomicBoolean(true); - MockRegionServerServices(ZooKeeperWatcher zkw) { + MockRegionServerServices(ZKWatcher zkw) { this(zkw, null); } - MockRegionServerServices(ZooKeeperWatcher zkw, ServerName serverName) { + MockRegionServerServices(ZKWatcher zkw, ServerName serverName) { this.zkw = zkw; this.serverName = serverName; this.conf = (zkw == null ? new Configuration() : zkw.getConfiguration()); @@ -167,7 +168,7 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index cec2c20758..8a13142d6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRespon import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -124,7 +125,7 @@ public class TestMetaTableAccessorNoCluster { public void testRideOverServerNotRunning() throws IOException, InterruptedException, ServiceException { // Need a zk watcher. - ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), + ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), ABORTABLE, true); // This is a servername we use in a few places below. ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java index 8bebd8d7d7..9d8bd4a968 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; @@ -66,7 +66,7 @@ public class TestMetaTableLocator { private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final ServerName SN = ServerName.valueOf("example.org", 1234, System.currentTimeMillis()); - private ZooKeeperWatcher watcher; + private ZKWatcher watcher; private Abortable abortable; @BeforeClass public static void beforeClass() throws Exception { @@ -91,7 +91,7 @@ public class TestMetaTableLocator { return false; } }; - this.watcher = new ZooKeeperWatcher(UTIL.getConfiguration(), + this.watcher = new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), this.abortable, true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java index 12605519f5..27fb566e0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -59,7 +60,7 @@ public class TestMultiVersions { private static final Log LOG = LogFactory.getLog(TestMultiVersions.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private Admin admin; - + private static final int NUM_SLAVES = 3; @Rule @@ -85,7 +86,7 @@ public class TestMultiVersions { * Tests user specifiable time stamps putting, getting and scanning. Also * tests same in presence of deletes. Test cores are written so can be * run against an HRegion and against an HTable: i.e. both local and remote. - * + * *

Port of old TestTimestamp test to here so can better utilize the spun * up cluster running more than a single test per spin up. Keep old tests' * crazyness. @@ -184,7 +185,7 @@ public class TestMultiVersions { * Port of old TestScanMultipleVersions test here so can better utilize the * spun up cluster running more than just a single test. Keep old tests * crazyness. - * + * *

Tests five cases of scans and timestamps. * @throws Exception */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 4ed8d9173b..4947a43e44 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -55,8 +55,8 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.EmptyWatcher; import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; @@ -126,15 +126,15 @@ public class TestZooKeeper { TEST_UTIL.shutdownMiniHBaseCluster(); } finally { TEST_UTIL.getTestFileSystem().delete(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), true); - ZKUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); + ZooKeeperUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); } } - private ZooKeeperWatcher getZooKeeperWatcher(Connection c) + private ZKWatcher getZooKeeperWatcher(Connection c) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Method getterZK = c.getClass().getDeclaredMethod("getKeepAliveZooKeeperWatcher"); getterZK.setAccessible(true); - return (ZooKeeperWatcher) getterZK.invoke(c); + return (ZKWatcher) getterZK.invoke(c); } @@ -153,40 +153,40 @@ public class TestZooKeeper { Connection connection = ConnectionFactory.createConnection(c); - ZooKeeperWatcher connectionZK = getZooKeeperWatcher(connection); - LOG.info("ZooKeeperWatcher= 0x"+ Integer.toHexString( + ZKWatcher connectionZK = getZooKeeperWatcher(connection); + LOG.info("ZKWatcher= 0x"+ Integer.toHexString( connectionZK.hashCode())); - LOG.info("getRecoverableZooKeeper= 0x"+ Integer.toHexString( - connectionZK.getRecoverableZooKeeper().hashCode())); + LOG.info("getRecoverableZK= 0x"+ Integer.toHexString( + connectionZK.getRecoverableZK().hashCode())); LOG.info("session="+Long.toHexString( - connectionZK.getRecoverableZooKeeper().getSessionId())); + connectionZK.getRecoverableZK().getSessionId())); TEST_UTIL.expireSession(connectionZK); LOG.info("Before using zkw state=" + - connectionZK.getRecoverableZooKeeper().getState()); + connectionZK.getRecoverableZK().getState()); // provoke session expiration by doing something with ZK try { - connectionZK.getRecoverableZooKeeper().getZooKeeper().exists( + connectionZK.getRecoverableZK().getZooKeeper().exists( "/1/1", false); } catch (KeeperException ignored) { } // Check that the old ZK connection is closed, means we did expire - States state = connectionZK.getRecoverableZooKeeper().getState(); + States state = connectionZK.getRecoverableZK().getState(); LOG.info("After using zkw state=" + state); LOG.info("session="+Long.toHexString( - connectionZK.getRecoverableZooKeeper().getSessionId())); + connectionZK.getRecoverableZK().getSessionId())); // It's asynchronous, so we may have to wait a little... final long limit1 = System.currentTimeMillis() + 3000; while (System.currentTimeMillis() < limit1 && state != States.CLOSED){ - state = connectionZK.getRecoverableZooKeeper().getState(); + state = connectionZK.getRecoverableZK().getState(); } LOG.info("After using zkw loop=" + state); LOG.info("ZooKeeper should have timed out"); LOG.info("session="+Long.toHexString( - connectionZK.getRecoverableZooKeeper().getSessionId())); + connectionZK.getRecoverableZK().getSessionId())); // It's surprising but sometimes we can still be in connected state. // As it's known (even if not understood) we don't make the the test fail @@ -194,9 +194,9 @@ public class TestZooKeeper { // Assert.assertTrue("state=" + state, state == States.CLOSED); // Check that the client recovered - ZooKeeperWatcher newConnectionZK = getZooKeeperWatcher(connection); + ZKWatcher newConnectionZK = getZooKeeperWatcher(connection); - States state2 = newConnectionZK.getRecoverableZooKeeper().getState(); + States state2 = newConnectionZK.getRecoverableZK().getState(); LOG.info("After new get state=" +state2); // As it's an asynchronous event we may got the same ZKW, if it's not @@ -206,7 +206,7 @@ public class TestZooKeeper { state2 != States.CONNECTED && state2 != States.CONNECTING) { newConnectionZK = getZooKeeperWatcher(connection); - state2 = newConnectionZK.getRecoverableZooKeeper().getState(); + state2 = newConnectionZK.getRecoverableZK().getState(); } LOG.info("After new get state loop=" + state2); @@ -286,9 +286,9 @@ public class TestZooKeeper { ipMeta.exists(new Get(row)); // make sure they aren't the same - ZooKeeperWatcher z1 = + ZKWatcher z1 = getZooKeeperWatcher(ConnectionFactory.createConnection(localMeta.getConfiguration())); - ZooKeeperWatcher z2 = + ZKWatcher z2 = getZooKeeperWatcher(ConnectionFactory.createConnection(otherConf)); assertFalse(z1 == z2); assertFalse(z1.getQuorum().equals(z2.getQuorum())); @@ -304,19 +304,19 @@ public class TestZooKeeper { */ @Test public void testCreateWithParents() throws Exception { - ZooKeeperWatcher zkw = - new ZooKeeperWatcher(new Configuration(TEST_UTIL.getConfiguration()), + ZKWatcher zkw = + new ZKWatcher(new Configuration(TEST_UTIL.getConfiguration()), TestZooKeeper.class.getName(), null); byte[] expectedData = new byte[] { 1, 2, 3 }; - ZKUtil.createWithParents(zkw, "/l1/l2/l3/l4/testCreateWithParents", expectedData); - byte[] data = ZKUtil.getData(zkw, "/l1/l2/l3/l4/testCreateWithParents"); + ZooKeeperUtil.createWithParents(zkw, "/l1/l2/l3/l4/testCreateWithParents", expectedData); + byte[] data = ZooKeeperUtil.getData(zkw, "/l1/l2/l3/l4/testCreateWithParents"); assertTrue(Bytes.equals(expectedData, data)); - ZKUtil.deleteNodeRecursively(zkw, "/l1"); + ZooKeeperUtil.deleteNodeRecursively(zkw, "/l1"); - ZKUtil.createWithParents(zkw, "/testCreateWithParents", expectedData); - data = ZKUtil.getData(zkw, "/testCreateWithParents"); + ZooKeeperUtil.createWithParents(zkw, "/testCreateWithParents", expectedData); + data = ZooKeeperUtil.getData(zkw, "/testCreateWithParents"); assertTrue(Bytes.equals(expectedData, data)); - ZKUtil.deleteNodeRecursively(zkw, "/testCreateWithParents"); + ZooKeeperUtil.deleteNodeRecursively(zkw, "/testCreateWithParents"); } /** @@ -326,25 +326,25 @@ public class TestZooKeeper { */ @Test public void testZNodeDeletes() throws Exception { - ZooKeeperWatcher zkw = new ZooKeeperWatcher( + ZKWatcher zkw = new ZKWatcher( new Configuration(TEST_UTIL.getConfiguration()), TestZooKeeper.class.getName(), null); - ZKUtil.createWithParents(zkw, "/l1/l2/l3/l4"); + ZooKeeperUtil.createWithParents(zkw, "/l1/l2/l3/l4"); try { - ZKUtil.deleteNode(zkw, "/l1/l2"); + ZooKeeperUtil.deleteNode(zkw, "/l1/l2"); fail("We should not be able to delete if znode has childs"); } catch (KeeperException ex) { - assertNotNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2/l3/l4", null)); + assertNotNull(ZooKeeperUtil.getDataNoWatch(zkw, "/l1/l2/l3/l4", null)); } - ZKUtil.deleteNodeRecursively(zkw, "/l1/l2"); + ZooKeeperUtil.deleteNodeRecursively(zkw, "/l1/l2"); // make sure it really is deleted - assertNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2/l3/l4", null)); + assertNull(ZooKeeperUtil.getDataNoWatch(zkw, "/l1/l2/l3/l4", null)); // do the same delete again and make sure it doesn't crash - ZKUtil.deleteNodeRecursively(zkw, "/l1/l2"); + ZooKeeperUtil.deleteNodeRecursively(zkw, "/l1/l2"); - ZKUtil.deleteNode(zkw, "/l1"); - assertNull(ZKUtil.getDataNoWatch(zkw, "/l1/l2", null)); + ZooKeeperUtil.deleteNode(zkw, "/l1"); + assertNull(ZooKeeperUtil.getDataNoWatch(zkw, "/l1/l2", null)); } /** @@ -367,7 +367,7 @@ public class TestZooKeeper { // Assumes the root of the ZooKeeper space is writable as it creates a node // wherever the cluster home is defined. - ZooKeeperWatcher zk2 = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zk2 = new ZKWatcher(TEST_UTIL.getConfiguration(), "testCreateSilentIsReallySilent", null); // Save the previous ACL @@ -430,7 +430,7 @@ public class TestZooKeeper { } } zk.close(); - ZKUtil.createAndFailSilent(zk2, aclZnode); + ZooKeeperUtil.createAndFailSilent(zk2, aclZnode); // Restore the ACL ZooKeeper zk3 = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance); @@ -450,8 +450,8 @@ public class TestZooKeeper { @SuppressWarnings("deprecation") public void testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE() throws Exception { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), name.getMethodName(), null); - ZKUtil.getChildDataAndWatchForNewChildren(zkw, "/wrongNode"); + ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), name.getMethodName(), null); + ZooKeeperUtil.getChildDataAndWatchForNewChildren(zkw, "/wrongNode"); } /** @@ -465,7 +465,7 @@ public class TestZooKeeper { cluster.startRegionServer(); cluster.waitForActiveAndReadyMaster(10000); HMaster m = cluster.getMaster(); - final ZooKeeperWatcher zkw = m.getZooKeeper(); + final ZKWatcher zkw = m.getZooKeeper(); // now the cluster is up. So assign some regions. try (Admin admin = TEST_UTIL.getAdmin()) { byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), @@ -496,14 +496,14 @@ public class TestZooKeeper { * Count listeners in zkw excluding listeners, that belongs to workers or other * temporary processes. */ - private int countPermanentListeners(ZooKeeperWatcher watcher) { + private int countPermanentListeners(ZKWatcher watcher) { return countListeners(watcher, ZkSplitLogWorkerCoordination.class); } /** * Count listeners in zkw excluding provided classes */ - private int countListeners(ZooKeeperWatcher watcher, Class... exclude) { + private int countListeners(ZKWatcher watcher, Class... exclude) { int cnt = 0; for (Object o : watcher.getListeners()) { boolean skip = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 20cb5133bf..6dadcbad3f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.StoppableImplementation; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; @@ -93,9 +93,9 @@ public class TestZooKeeperTableArchiveClient { CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration()); archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION); // make hfile archiving node so we can archive files - ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); + ZKWatcher watcher = UTIL.getZooKeeperWatcher(); String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); - ZKUtil.createWithParents(watcher, archivingZNode); + ZooKeeperUtil.createWithParents(watcher, archivingZNode); rss = mock(RegionServerServices.class); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index d7d1b3ac6e..7a6a7241cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -24,10 +24,9 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 953fae0876..b02d7c943e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -25,15 +25,10 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Random; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -54,13 +49,12 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -70,7 +64,6 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java index fb9fb37a90..f7436fe035 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest; @@ -77,7 +77,7 @@ public class TestHBaseAdminNoCluster { * @throws ZooKeeperConnectionException * @throws MasterNotRunningException * @throws ServiceException - * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException + * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException */ //TODO: Clean up, with Procedure V2 and nonce to prevent the same procedure to call mulitple // time, this test is invalid anymore. Just keep the test around for some time before @@ -85,7 +85,7 @@ public class TestHBaseAdminNoCluster { @Ignore @Test public void testMasterMonitorCallableRetries() - throws MasterNotRunningException, ZooKeeperConnectionException, IOException, + throws MasterNotRunningException, ZooKeeperConnectionException, IOException, org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { Configuration configuration = HBaseConfiguration.create(); // Set the pause and retry count way down. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 98d864bd96..10d80b84d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -55,8 +55,8 @@ import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.HBaseFsckRepair; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; @@ -131,22 +131,22 @@ public class TestMetaWithReplicas { @Test public void testZookeeperNodesForReplicas() throws Exception { // Checks all the znodes exist when meta's replicas are enabled - ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); Configuration conf = TEST_UTIL.getConfiguration(); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = ZKUtil.joinZNode(baseZNode, + String primaryMetaZnode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); // check that the data in the znode is parseable (this would also mean the znode exists) - byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); + byte[] data = ZooKeeperUtil.getData(zkw, primaryMetaZnode); ProtobufUtil.toServerName(data); for (int i = 1; i < 3; i++) { - String secZnode = ZKUtil.joinZNode(baseZNode, + String secZnode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i); String str = zkw.znodePaths.getZNodeForReplica(i); assertTrue(str.equals(secZnode)); // check that the data in the znode is parseable (this would also mean the znode exists) - data = ZKUtil.getData(zkw, secZnode); + data = ZooKeeperUtil.getData(zkw, secZnode); ProtobufUtil.toServerName(data); } } @@ -165,15 +165,15 @@ public class TestMetaWithReplicas { // server holding the primary meta replica. Then it does a put/get into/from // the test table. The put/get operations would use the replicas to locate the // location of the test table's region - ZooKeeperWatcher zkw = util.getZooKeeperWatcher(); + ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); conf.setBoolean(HConstants.USE_META_REPLICAS, true); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = ZKUtil.joinZNode(baseZNode, + String primaryMetaZnode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); - byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); + byte[] data = ZooKeeperUtil.getData(zkw, primaryMetaZnode); ServerName primary = ProtobufUtil.toServerName(data); TableName TABLE = TableName.valueOf("testShutdownHandling"); @@ -367,8 +367,8 @@ public class TestMetaWithReplicas { false, false); HBaseFsckRepair.closeRegionSilentlyAndWait(c, rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo()); - ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); - ZKUtil.deleteNode(zkw, zkw.znodePaths.getZNodeForReplica(2)); + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + ZooKeeperUtil.deleteNode(zkw, zkw.znodePaths.getZNodeForReplica(2)); // check that problem exists HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false); assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION}); @@ -399,13 +399,13 @@ public class TestMetaWithReplicas { // caches update themselves. Uses the master operations to test // this Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String primaryMetaZnode = ZKUtil.joinZNode(baseZNode, + String primaryMetaZnode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.metaserver", "meta-region-server")); // check that the data in the znode is parseable (this would also mean the znode exists) - byte[] data = ZKUtil.getData(zkw, primaryMetaZnode); + byte[] data = ZooKeeperUtil.getData(zkw, primaryMetaZnode); ServerName currentServer = ProtobufUtil.toServerName(data); Collection liveServers = TEST_UTIL.getAdmin() .getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(); @@ -427,7 +427,7 @@ public class TestMetaWithReplicas { final int max = 10000; do { Thread.sleep(10); - data = ZKUtil.getData(zkw, primaryMetaZnode); + data = ZooKeeperUtil.getData(zkw, primaryMetaZnode); currentServer = ProtobufUtil.toServerName(data); i++; } while (!moveToServer.equals(currentServer) && i < max); //wait for 10 seconds overall diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index a23b76ae0f..e0d74c91aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -34,17 +34,15 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ReplicationPeerNotFoundException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; -import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -172,7 +170,7 @@ public class TestReplicationAdmin { ReplicationPeerConfig rpc2 = new ReplicationPeerConfig(); rpc2.setClusterKey(KEY_SECOND); Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test HBaseAdmin", null); + ZKWatcher zkw = new ZKWatcher(conf, "Test HBaseAdmin", null); ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, null, zkw)); repQueues.init("server1"); @@ -187,7 +185,7 @@ public class TestReplicationAdmin { } repQueues.removeQueue(ID_ONE); assertEquals(0, repQueues.getAllQueues().size()); - + // add recovered queue for ID_ONE repQueues.addLog(ID_ONE + "-server2", "file1"); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java index 0595a67711..e549346555 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java @@ -39,8 +39,9 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -60,10 +61,10 @@ import static org.junit.Assert.fail; @Category({CoprocessorTests.class, MediumTests.class}) public class TestMasterCoprocessorExceptionWithAbort { - public static class MasterTracker extends ZooKeeperNodeTracker { + public static class MasterTracker extends ZKNodeTracker { public boolean masterZKNodeWasDeleted = false; - public MasterTracker(ZooKeeperWatcher zkw, String masterNode, Abortable abortable) { + public MasterTracker(ZKWatcher zkw, String masterNode, Abortable abortable) { super(zkw, masterNode, abortable); } @@ -174,7 +175,7 @@ public class TestMasterCoprocessorExceptionWithAbort { // set a watch on the zookeeper /hbase/master node. If the master dies, // the node will be deleted. - ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), + ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() { @Override public void abort(String why, Throwable e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java index d4c6e4f8b5..c4defa2d74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -59,10 +59,10 @@ import static org.junit.Assert.fail; @Category({CoprocessorTests.class, MediumTests.class}) public class TestMasterCoprocessorExceptionWithRemove { - public static class MasterTracker extends ZooKeeperNodeTracker { + public static class MasterTracker extends ZKNodeTracker { public boolean masterZKNodeWasDeleted = false; - public MasterTracker(ZooKeeperWatcher zkw, String masterNode, Abortable abortable) { + public MasterTracker(ZKWatcher zkw, String masterNode, Abortable abortable) { super(zkw, masterNode, abortable); } @@ -159,7 +159,7 @@ public class TestMasterCoprocessorExceptionWithRemove { // we are testing that the default setting of hbase.coprocessor.abortonerror // =false // is respected. - ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), + ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() { @Override public void abort(String why, Throwable e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java index 7041c92da8..4f967f33df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java index e779706ebb..919fbfa164 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 85d2b0ba7c..dadec1fbe4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import com.google.protobuf.Service; @@ -150,7 +150,7 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 119c22547f..15cfcc7d74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -25,7 +25,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -38,7 +37,7 @@ import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -65,7 +64,7 @@ import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; @@ -136,7 +135,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuo class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, ClientProtos.ClientService.BlockingInterface, RegionServerServices { private final ServerName sn; - private final ZooKeeperWatcher zkw; + private final ZKWatcher zkw; private final Configuration conf; private final Random random = new Random(); @@ -183,13 +182,13 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { /** * @param sn Name of this mock regionserver * @throws IOException - * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException + * @throws ZooKeeperConnectionException */ MockRegionServer(final Configuration conf, final ServerName sn) throws ZooKeeperConnectionException, IOException { this.sn = sn; this.conf = conf; - this.zkw = new ZooKeeperWatcher(conf, sn.toString(), this, true); + this.zkw = new ZKWatcher(conf, sn.toString(), this, true); } /** @@ -282,7 +281,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return this.zkw; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index f1feef55d5..26b11cce3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -41,9 +41,10 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -70,11 +71,11 @@ public class TestActiveMasterManager { } @Test public void testRestartMaster() throws IOException, KeeperException { - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true); try { - ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); - ZKUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode); + ZooKeeperUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); + ZooKeeperUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode); } catch(KeeperException.NoNodeException nne) {} // Create the master node with a dummy address @@ -112,11 +113,11 @@ public class TestActiveMasterManager { */ @Test public void testActiveMasterManagerFromZK() throws Exception { - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true); try { - ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); - ZKUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode); + ZooKeeperUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); + ZooKeeperUtil.deleteNode(zk, zk.znodePaths.clusterStateZNode); } catch(KeeperException.NoNodeException nne) {} // Create the master node with a dummy address @@ -135,7 +136,7 @@ public class TestActiveMasterManager { ClusterStatusTracker clusterStatusTracker = ms1.getClusterStatusTracker(); clusterStatusTracker.setClusterUp(); - activeMasterManager.blockUntilBecomingActiveMaster(100, + activeMasterManager.blockUntilBecomingActiveMaster(100, Mockito.mock(MonitoredTask.class)); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk, firstMasterAddress); @@ -165,7 +166,7 @@ public class TestActiveMasterManager { zk.registerListener(listener); LOG.info("Deleting master node"); - ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); + ZooKeeperUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); // Wait for the node to be deleted LOG.info("Waiting for active master manager to be notified"); @@ -185,7 +186,7 @@ public class TestActiveMasterManager { assertTrue(t.isActiveMaster); LOG.info("Deleting master node"); - ZKUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); + ZooKeeperUtil.deleteNode(zk, zk.znodePaths.masterAddressZNode); } /** @@ -193,9 +194,9 @@ public class TestActiveMasterManager { * @param zk * @param thisMasterAddress * @throws KeeperException - * @throws IOException + * @throws IOException */ - private void assertMaster(ZooKeeperWatcher zk, + private void assertMaster(ZKWatcher zk, ServerName expectedAddress) throws KeeperException, IOException { ServerName readAddress = MasterAddressTracker.getMasterAddress(zk); @@ -209,7 +210,7 @@ public class TestActiveMasterManager { DummyMaster dummyMaster; boolean isActiveMaster; - public WaitToBeMasterThread(ZooKeeperWatcher zk, ServerName address) { + public WaitToBeMasterThread(ZKWatcher zk, ServerName address) { this.dummyMaster = new DummyMaster(zk,address); this.manager = this.dummyMaster.getActiveMasterManager(); isActiveMaster = false; @@ -224,13 +225,13 @@ public class TestActiveMasterManager { } } - public static class NodeDeletionListener extends ZooKeeperListener { + public static class NodeDeletionListener extends ZKListener { private static final Log LOG = LogFactory.getLog(NodeDeletionListener.class); private Semaphore lock; private String node; - public NodeDeletionListener(ZooKeeperWatcher watcher, String node) { + public NodeDeletionListener(ZKWatcher watcher, String node) { super(watcher); lock = new Semaphore(0); this.node = node; @@ -257,7 +258,7 @@ public class TestActiveMasterManager { private ClusterStatusTracker clusterStatusTracker; private ActiveMasterManager activeMasterManager; - public DummyMaster(ZooKeeperWatcher zk, ServerName master) { + public DummyMaster(ZKWatcher zk, ServerName master) { this.clusterStatusTracker = new ClusterStatusTracker(zk, this); clusterStatusTracker.start(); @@ -269,7 +270,7 @@ public class TestActiveMasterManager { @Override public void abort(final String msg, final Throwable t) {} - + @Override public boolean isAborted() { return false; @@ -281,7 +282,7 @@ public class TestActiveMasterManager { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index 4c4a8edf84..9e252e8b45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -47,10 +47,8 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; -import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -318,11 +316,11 @@ public class TestAssignmentListener { // are properly added to the ServerManager.drainingServers when they // register with the ServerManager under these circumstances. Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zooKeeper = new ZooKeeperWatcher(conf, + ZKWatcher zooKeeper = new ZKWatcher(conf, "zkWatcher-NewServerDrainTest", abortable, true); String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); - String drainingZNode = ZKUtil.joinZNode(baseZNode, + String drainingZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); HMaster master = Mockito.mock(HMaster.class); @@ -348,8 +346,8 @@ public class TestAssignmentListener { // Create draining znodes for the draining servers, which would have been // performed when the previous HMaster was running. for (ServerName sn : drainingServers) { - String znode = ZKUtil.joinZNode(drainingZNode, sn.getServerName()); - ZKUtil.createAndFailSilent(zooKeeper, znode); + String znode = ZooKeeperUtil.joinZNode(drainingZNode, sn.getServerName()); + ZooKeeperUtil.createAndFailSilent(zooKeeper, znode); } // Now, we follow the same order of steps that the HMaster does to setup diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index 852b139398..bd7c5073f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -27,19 +27,13 @@ import java.net.InetAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClockOutOfSyncException; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index eafc41295f..04994bf429 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -105,8 +105,8 @@ import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.After; import org.junit.AfterClass; @@ -207,7 +207,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.shutdownMiniHBaseCluster(); } finally { TEST_UTIL.getTestFileSystem().delete(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), true); - ZKUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); + ZooKeeperUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); } } @@ -229,7 +229,7 @@ public class TestDistributedLogSplitting { Path rootdir = FSUtils.getRootDir(conf); - Table t = installTable(new ZooKeeperWatcher(conf, "table-creation", null), + Table t = installTable(new ZKWatcher(conf, "table-creation", null), "table", "family", 40); try { TableName table = t.getName(); @@ -312,7 +312,7 @@ public class TestDistributedLogSplitting { // they will consume recovered.edits master.balanceSwitch(false); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { HRegionServer hrs = findRSToKill(false, "table"); @@ -364,7 +364,7 @@ public class TestDistributedLogSplitting { startCluster(NUM_RS); master.balanceSwitch(false); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE); NonceGeneratorWithDups ng = new NonceGeneratorWithDups(); NonceGenerator oldNg = @@ -421,7 +421,7 @@ public class TestDistributedLogSplitting { // they will consume recovered.edits master.balanceSwitch(false); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { HRegionServer hrs = findRSToKill(true, "table"); @@ -435,14 +435,14 @@ public class TestDistributedLogSplitting { } } - private void abortRSAndVerifyRecovery(HRegionServer hrs, Table ht, final ZooKeeperWatcher zkw, + private void abortRSAndVerifyRecovery(HRegionServer hrs, Table ht, final ZKWatcher zkw, final int numRegions, final int numofLines) throws Exception { abortRSAndWaitForRecovery(hrs, zkw, numRegions); assertEquals(numofLines, TEST_UTIL.countRows(ht)); } - private void abortRSAndWaitForRecovery(HRegionServer hrs, final ZooKeeperWatcher zkw, + private void abortRSAndWaitForRecovery(HRegionServer hrs, final ZKWatcher zkw, final int numRegions) throws Exception { final MiniHBaseCluster tmpCluster = this.cluster; @@ -471,7 +471,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - List recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( + List recoveringRegions = zkw.getRecoverableZK().getChildren( zkw.znodePaths.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.isEmpty()); } @@ -491,7 +491,7 @@ public class TestDistributedLogSplitting { // they will consume recovered.edits master.balanceSwitch(false); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { HRegionServer hrs = findRSToKill(false, "table"); @@ -549,7 +549,7 @@ public class TestDistributedLogSplitting { // they will consume recovered.edits master.balanceSwitch(false); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { HRegionServer hrs = findRSToKill(false, "table"); @@ -578,7 +578,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - List recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( + List recoveringRegions = zkw.getRecoverableZK().getChildren( zkw.znodePaths.recoveringRegionsZNode, false); boolean done = recoveringRegions != null && recoveringRegions.isEmpty(); if (!done) { @@ -611,7 +611,7 @@ public class TestDistributedLogSplitting { master.balanceSwitch(false); List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { List regions = null; @@ -670,7 +670,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - List recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( + List recoveringRegions = zkw.getRecoverableZK().getChildren( zkw.znodePaths.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.isEmpty()); } @@ -690,7 +690,7 @@ public class TestDistributedLogSplitting { startCluster(NUM_RS); master.balanceSwitch(false); List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = master.getZooKeeper(); + final ZKWatcher zkw = master.getZooKeeper(); Table ht = installTable(zkw, "table", "family", 40); try { final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); @@ -714,8 +714,8 @@ public class TestDistributedLogSplitting { slm.markRegionsRecovering(firstFailedServer, regionSet); slm.markRegionsRecovering(secondFailedServer, regionSet); - List recoveringRegions = ZKUtil.listChildrenNoWatch(zkw, - ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, region.getEncodedName())); + List recoveringRegions = ZooKeeperUtil.listChildrenNoWatch(zkw, + ZooKeeperUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, region.getEncodedName())); assertEquals(recoveringRegions.size(), 2); @@ -744,7 +744,7 @@ public class TestDistributedLogSplitting { master.balanceSwitch(false); List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { List regions = null; @@ -788,7 +788,7 @@ public class TestDistributedLogSplitting { final int NUM_LOG_LINES = 1000; List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table disablingHT = installTable(zkw, "disableTable", "family", NUM_REGIONS_TO_CREATE); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE, NUM_REGIONS_TO_CREATE); try { @@ -871,7 +871,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - List recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( + List recoveringRegions = zkw.getRecoverableZK().getChildren( zkw.znodePaths.recoveringRegionsZNode, false); ServerManager serverManager = master.getServerManager(); return (!serverManager.areDeadServersInProgress() && @@ -939,7 +939,7 @@ public class TestDistributedLogSplitting { master.balanceSwitch(false); List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { final SplitLogManager slm = master.getMasterWalManager().getSplitLogManager(); @@ -1026,7 +1026,7 @@ public class TestDistributedLogSplitting { final Path logDir = new Path(rootdir, AbstractFSWALProvider.getWALDirectoryName(hrs.getServerName().toString())); - Table t = installTable(new ZooKeeperWatcher(conf, "table-creation", null), + Table t = installTable(new ZKWatcher(conf, "table-creation", null), "table", "family", 40); try { makeWAL(hrs, ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices()), @@ -1081,7 +1081,7 @@ public class TestDistributedLogSplitting { startCluster(NUM_RS); // NUM_RS=6. - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, + final ZKWatcher zkw = new ZKWatcher(conf, "distributed log splitting test", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); @@ -1116,7 +1116,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitFor(180000, 200, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - List recoveringRegions = zkw.getRecoverableZooKeeper().getChildren( + List recoveringRegions = zkw.getRecoverableZK().getChildren( zkw.znodePaths.recoveringRegionsZNode, false); return (recoveringRegions != null && recoveringRegions.isEmpty()); } @@ -1209,7 +1209,7 @@ public class TestDistributedLogSplitting { // turn off load balancing to prevent regions from moving around otherwise // they will consume recovered.edits master.balanceSwitch(false); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); // only testing meta recovery in ZK operation HRegionServer hrs = findRSToKill(true, null); @@ -1224,7 +1224,7 @@ public class TestDistributedLogSplitting { master.getMasterWalManager().prepareLogReplay(hrs.getServerName(), userRegionSet); boolean isMetaRegionInRecovery = false; List recoveringRegions = - zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false); + zkw.getRecoverableZK().getChildren(zkw.znodePaths.recoveringRegionsZNode, false); for (String curEncodedRegionName : recoveringRegions) { if (curEncodedRegionName.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) { isMetaRegionInRecovery = true; @@ -1237,7 +1237,7 @@ public class TestDistributedLogSplitting { isMetaRegionInRecovery = false; recoveringRegions = - zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false); + zkw.getRecoverableZK().getChildren(zkw.znodePaths.recoveringRegionsZNode, false); for (String curEncodedRegionName : recoveringRegions) { if (curEncodedRegionName.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName())) { isMetaRegionInRecovery = true; @@ -1263,7 +1263,7 @@ public class TestDistributedLogSplitting { master.balanceSwitch(false); List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, name.getMethodName(), "family", NUM_REGIONS_TO_CREATE); try { List regions = null; @@ -1359,7 +1359,7 @@ public class TestDistributedLogSplitting { master.balanceSwitch(false); List rsts = cluster.getLiveRegionServerThreads(); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE); try { List regions = null; @@ -1453,7 +1453,7 @@ public class TestDistributedLogSplitting { public void testReadWriteSeqIdFiles() throws Exception { LOG.info("testReadWriteSeqIdFiles"); startCluster(2); - final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); + final ZKWatcher zkw = new ZKWatcher(conf, "table-creation", null); Table ht = installTable(zkw, name.getMethodName(), "family", 10); try { FileSystem fs = master.getMasterFileSystem().getFileSystem(); @@ -1484,11 +1484,11 @@ public class TestDistributedLogSplitting { } } - Table installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs) throws Exception { + Table installTable(ZKWatcher zkw, String tname, String fname, int nrs) throws Exception { return installTable(zkw, tname, fname, nrs, 0); } - Table installTable(ZooKeeperWatcher zkw, String tname, String fname, int nrs, + Table installTable(ZKWatcher zkw, String tname, String fname, int nrs, int existingRegions) throws Exception { // Create a table with regions TableName table = TableName.valueOf(tname); @@ -1672,7 +1672,7 @@ public class TestDistributedLogSplitting { return count; } - private void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master) throws Exception { + private void blockUntilNoRIT(ZKWatcher zkw, HMaster master) throws Exception { TEST_UTIL.waitUntilNoRegionsInTransition(60000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index 64d5a0292e..a43f4b9c89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; @@ -66,8 +66,8 @@ public class TestHMasterRPCException { conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 2000); testUtil.startMiniZKCluster(); - ZooKeeperWatcher watcher = testUtil.getZooKeeperWatcher(); - ZKUtil.createWithParents(watcher, watcher.znodePaths.masterAddressZNode, Bytes.toBytes("fake:123")); + ZKWatcher watcher = testUtil.getZooKeeperWatcher(); + ZooKeeperUtil.createWithParents(watcher, watcher.znodePaths.masterAddressZNode, Bytes.toBytes("fake:123")); master = new HMaster(conf); rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT); } @@ -103,7 +103,7 @@ public class TestHMasterRPCException { "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")); LOG.info("Expected exception: ", ie); if (!fakeZNodeDelete) { - testUtil.getZooKeeperWatcher().getRecoverableZooKeeper() + testUtil.getZooKeeperWatcher().getRecoverableZK() .delete(testUtil.getZooKeeperWatcher().znodePaths.masterAddressZNode, -1); fakeZNodeDelete = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 25671fcce0..d58a316a27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.MetaMockingUtil; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; @@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.zookeeper.KeeperException; import org.junit.After; @@ -105,7 +105,7 @@ public class TestMasterNoCluster { public void tearDown() throws KeeperException, ZooKeeperConnectionException, IOException { // Make sure zk is clean before we run the next test. - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TESTUTIL.getConfiguration(), + ZKWatcher zkw = new ZKWatcher(TESTUTIL.getConfiguration(), "@Before", new Abortable() { @Override public void abort(String why, Throwable e) { @@ -117,7 +117,7 @@ public class TestMasterNoCluster { return false; } }); - ZKUtil.deleteNodeRecursively(zkw, zkw.znodePaths.baseZNode); + ZooKeeperUtil.deleteNodeRecursively(zkw, zkw.znodePaths.baseZNode); zkw.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index ea57e1504d..91f2fa6b94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -41,8 +41,9 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZNodePaths; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKNodePaths; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -95,8 +96,8 @@ public class TestMasterStatusServlet { Mockito.doReturn(serverManager).when(master).getServerManager(); // Fake ZKW - ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class); - Mockito.doReturn(new ZNodePaths(conf)).when(zkw).getZNodePaths(); + ZKWatcher zkw = Mockito.mock(ZKWatcher.class); + Mockito.doReturn(new ZKNodePaths(conf)).when(zkw).getZNodePaths(); Mockito.doReturn("fakequorum").when(zkw).getQuorum(); Mockito.doReturn(zkw).when(master).getZooKeeper(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java index 8641b20e4b..89eda98df3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterWalManager.java @@ -26,18 +26,15 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs.Ids; import org.junit.AfterClass; @@ -79,27 +76,28 @@ public class TestMasterWalManager { String walPath = "/hbase/data/.logs/" + inRecoveryServerName.getServerName() + "-splitting/test"; // Create a ZKW to use in the test - ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL); - zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, walPath), + ZKWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL); + zkw.getRecoverableZK().create(ZKSplitLog.getEncodedNodeName(zkw, walPath), new SplitLogTask.Owned(inRecoveryServerName).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - String staleRegionPath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, staleRegion); - ZKUtil.createWithParents(zkw, staleRegionPath); - String inRecoveringRegionPath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, + String staleRegionPath = ZooKeeperUtil + .joinZNode(zkw.znodePaths.recoveringRegionsZNode, staleRegion); + ZooKeeperUtil.createWithParents(zkw, staleRegionPath); + String inRecoveringRegionPath = ZooKeeperUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, failedRegion); - inRecoveringRegionPath = ZKUtil.joinZNode(inRecoveringRegionPath, + inRecoveringRegionPath = ZooKeeperUtil.joinZNode(inRecoveringRegionPath, inRecoveryServerName.getServerName()); - ZKUtil.createWithParents(zkw, inRecoveringRegionPath); + ZooKeeperUtil.createWithParents(zkw, inRecoveringRegionPath); Set servers = new HashSet<>(); servers.add(previouselyFaildServerName); mwm.removeStaleRecoveringRegionsFromZK(servers); // verification - assertFalse(ZKUtil.checkExists(zkw, staleRegionPath) != -1); - assertTrue(ZKUtil.checkExists(zkw, inRecoveringRegionPath) != -1); + assertFalse(ZooKeeperUtil.checkExists(zkw, staleRegionPath) != -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, inRecoveringRegionPath) != -1); - ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.recoveringRegionsZNode); - ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.splitLogZNode); + ZooKeeperUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.recoveringRegionsZNode); + ZooKeeperUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.splitLogZNode); zkw.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index d50e4a3876..20a7000b0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -95,8 +95,8 @@ public class TestMetaShutdownHandler { // Delete the ephemeral node of the meta-carrying region server. // This is trigger the expire of this region server on the master. String rsEphemeralNodePath = - ZKUtil.joinZNode(master.getZooKeeper().znodePaths.rsZNode, metaServerName.toString()); - ZKUtil.deleteNode(master.getZooKeeper(), rsEphemeralNodePath); + ZooKeeperUtil.joinZNode(master.getZooKeeper().znodePaths.rsZNode, metaServerName.toString()); + ZooKeeperUtil.deleteNode(master.getZooKeeper(), rsEphemeralNodePath); // Wait for SSH to finish final ServerManager serverManager = master.getServerManager(); final ServerName priorMetaServerName = metaServerName; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 62157907e0..1b6bbcb2f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -64,7 +64,8 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.zookeeper.CreateMode; @@ -88,7 +89,7 @@ public class TestSplitLogManager { Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG); } - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private DummyMasterServices master; private SplitLogManager slm; private Configuration conf; @@ -98,17 +99,17 @@ public class TestSplitLogManager { private static HBaseTestingUtility TEST_UTIL; class DummyMasterServices extends MockNoopMasterServices { - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private CoordinatedStateManager cm; - public DummyMasterServices(ZooKeeperWatcher zkw, Configuration conf) { + public DummyMasterServices(ZKWatcher zkw, Configuration conf) { super(conf); this.zkw = zkw; cm = new ZkCoordinatedStateManager(this); } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } @@ -130,15 +131,15 @@ public class TestSplitLogManager { conf = TEST_UTIL.getConfiguration(); // Use a different ZK wrapper instance for each tests. zkw = - new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null); + new ZKWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null); master = new DummyMasterServices(zkw, conf); - ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode); - ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode); - assertTrue(ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode) != -1); + ZooKeeperUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode); + ZooKeeperUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode); + assertTrue(ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.baseZNode) != -1); LOG.debug(zkw.znodePaths.baseZNode + " created"); - ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.splitLogZNode); - assertTrue(ZKUtil.checkExists(zkw, zkw.znodePaths.splitLogZNode) != -1); + ZooKeeperUtil.createAndFailSilent(zkw, zkw.znodePaths.splitLogZNode); + assertTrue(ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.splitLogZNode) != -1); LOG.debug(zkw.znodePaths.splitLogZNode + " created"); resetCounters(); @@ -208,7 +209,7 @@ public class TestSplitLogManager { String tasknode = ZKSplitLog.getEncodedNodeName(zkw, name); NodeCreationListener listener = new NodeCreationListener(zkw, tasknode); zkw.registerListener(listener); - ZKUtil.watchAndCheckExists(zkw, tasknode); + ZooKeeperUtil.watchAndCheckExists(zkw, tasknode); slm.enqueueSplitTask(name, batch); assertEquals(1, batch.installed); @@ -234,7 +235,7 @@ public class TestSplitLogManager { String tasknode = submitTaskAndWait(batch, "foo/1"); - byte[] data = ZKUtil.getData(zkw, tasknode); + byte[] data = ZooKeeperUtil.getData(zkw, tasknode); SplitLogTask slt = SplitLogTask.parseFrom(data); LOG.info("Task node created " + slt.toString()); assertTrue(slt.isUnassigned(master.getServerName())); @@ -246,7 +247,7 @@ public class TestSplitLogManager { String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); SplitLogTask slt = new SplitLogTask.Owned(master.getServerName(), this.mode); - zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, + zkw.getRecoverableZK().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); slm = new SplitLogManager(master, conf); @@ -271,9 +272,9 @@ public class TestSplitLogManager { String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); //create an unassigned orphan task SplitLogTask slt = new SplitLogTask.Unassigned(master.getServerName(), this.mode); - zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, + zkw.getRecoverableZK().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - int version = ZKUtil.checkExists(zkw, tasknode); + int version = ZooKeeperUtil.checkExists(zkw, tasknode); slm = new SplitLogManager(master, conf); waitForCounter(tot_mgr_orphan_task_acquired, 0, 1, to/2); @@ -290,7 +291,7 @@ public class TestSplitLogManager { assertEquals(0, task.unforcedResubmits.get()); assertTrue(task.isOrphan()); assertTrue(task.isUnassigned()); - assertTrue(ZKUtil.checkExists(zkw, tasknode) > version); + assertTrue(ZooKeeperUtil.checkExists(zkw, tasknode) > version); } @Test (timeout=180000) @@ -301,24 +302,24 @@ public class TestSplitLogManager { TaskBatch batch = new TaskBatch(); String tasknode = submitTaskAndWait(batch, "foo/1"); - int version = ZKUtil.checkExists(zkw, tasknode); + int version = ZooKeeperUtil.checkExists(zkw, tasknode); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); final ServerName worker2 = ServerName.valueOf("worker2,1,1"); final ServerName worker3 = ServerName.valueOf("worker3,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); waitForCounter(tot_mgr_resubmit, 0, 1, to + to/2); - int version1 = ZKUtil.checkExists(zkw, tasknode); + int version1 = ZooKeeperUtil.checkExists(zkw, tasknode); assertTrue(version1 > version); slt = new SplitLogTask.Owned(worker2, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); waitForCounter(tot_mgr_heartbeat, 1, 2, to/2); waitForCounter(tot_mgr_resubmit, 1, 2, to + to/2); - int version2 = ZKUtil.checkExists(zkw, tasknode); + int version2 = ZooKeeperUtil.checkExists(zkw, tasknode); assertTrue(version2 > version1); slt = new SplitLogTask.Owned(worker3, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); waitForCounter(tot_mgr_heartbeat, 2, 3, to/2); waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + to/2); Thread.sleep(to + to/2); @@ -333,10 +334,10 @@ public class TestSplitLogManager { TaskBatch batch = new TaskBatch(); String tasknode = submitTaskAndWait(batch, "foo/1"); - int version = ZKUtil.checkExists(zkw, tasknode); + int version = ZooKeeperUtil.checkExists(zkw, tasknode); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); waitForCounter(new Expr() { @Override @@ -345,9 +346,9 @@ public class TestSplitLogManager { } }, 0, 1, 5*60000); // wait long enough Assert.assertEquals("Could not run test. Lost ZK connection?", 0, tot_mgr_resubmit_failed.sum()); - int version1 = ZKUtil.checkExists(zkw, tasknode); + int version1 = ZooKeeperUtil.checkExists(zkw, tasknode); assertTrue(version1 > version); - byte[] taskstate = ZKUtil.getData(zkw, tasknode); + byte[] taskstate = ZooKeeperUtil.getData(zkw, tasknode); slt = SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(master.getServerName())); @@ -363,14 +364,14 @@ public class TestSplitLogManager { String tasknode = submitTaskAndWait(batch, "foo/1"); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Done(worker1, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); synchronized (batch) { while (batch.installed != batch.done) { batch.wait(); } } waitForCounter(tot_mgr_task_deleted, 0, 1, to/2); - assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, tasknode) == -1); } @Test (timeout=180000) @@ -384,7 +385,7 @@ public class TestSplitLogManager { String tasknode = submitTaskAndWait(batch, "foo/1"); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Err(worker1, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); synchronized (batch) { while (batch.installed != batch.error) { @@ -392,7 +393,7 @@ public class TestSplitLogManager { } } waitForCounter(tot_mgr_task_deleted, 0, 1, to/2); - assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, tasknode) == -1); conf.setInt("hbase.splitlog.max.resubmit", ZKSplitLogManagerCoordination.DEFAULT_MAX_RESUBMIT); } @@ -409,15 +410,15 @@ public class TestSplitLogManager { assertEquals(tot_mgr_resubmit.sum(), 0); SplitLogTask slt = new SplitLogTask.Resigned(worker1, this.mode); assertEquals(tot_mgr_resubmit.sum(), 0); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); - ZKUtil.checkExists(zkw, tasknode); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.checkExists(zkw, tasknode); // Could be small race here. if (tot_mgr_resubmit.sum() == 0) { waitForCounter(tot_mgr_resubmit, 0, 1, to/2); } assertEquals(tot_mgr_resubmit.sum(), 1); - byte[] taskstate = ZKUtil.getData(zkw, tasknode); + byte[] taskstate = ZooKeeperUtil.getData(zkw, tasknode); slt = SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(master.getServerName())); } @@ -431,7 +432,7 @@ public class TestSplitLogManager { String tasknode1 = ZKSplitLog.getEncodedNodeName(zkw, "orphan/1"); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode); - zkw.getRecoverableZooKeeper().create(tasknode1, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, + zkw.getRecoverableZK().create(tasknode1, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); slm = new SplitLogManager(master, conf); @@ -446,7 +447,7 @@ public class TestSplitLogManager { Thread.sleep(100); final ServerName worker2 = ServerName.valueOf("worker1,1,1"); slt = new SplitLogTask.Owned(worker2, this.mode); - ZKUtil.setData(zkw, tasknode1, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode1, slt.toByteArray()); } // since we have stopped heartbeating the owned node therefore it should @@ -467,10 +468,10 @@ public class TestSplitLogManager { TaskBatch batch = new TaskBatch(); String tasknode = submitTaskAndWait(batch, "foo/1"); - int version = ZKUtil.checkExists(zkw, tasknode); + int version = ZooKeeperUtil.checkExists(zkw, tasknode); final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); if (tot_mgr_heartbeat.sum() == 0) waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); slm.handleDeadWorker(worker1); if (tot_mgr_resubmit.sum() == 0) waitForCounter(tot_mgr_resubmit, 0, 1, to+to/2); @@ -478,9 +479,9 @@ public class TestSplitLogManager { waitForCounter(tot_mgr_resubmit_dead_server_task, 0, 1, to + to/2); } - int version1 = ZKUtil.checkExists(zkw, tasknode); + int version1 = ZooKeeperUtil.checkExists(zkw, tasknode); assertTrue(version1 > version); - byte[] taskstate = ZKUtil.getData(zkw, tasknode); + byte[] taskstate = ZooKeeperUtil.getData(zkw, tasknode); slt = SplitLogTask.parseFrom(taskstate); assertTrue(slt.isUnassigned(master.getServerName())); return; @@ -495,7 +496,7 @@ public class TestSplitLogManager { final ServerName worker1 = ServerName.valueOf("worker1,1,1"); SplitLogTask slt = new SplitLogTask.Owned(worker1, this.mode); - ZKUtil.setData(zkw, tasknode, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, tasknode, slt.toByteArray()); if (tot_mgr_heartbeat.sum() == 0) waitForCounter(tot_mgr_heartbeat, 0, 1, to/2); // Not yet resubmitted. @@ -547,7 +548,7 @@ public class TestSplitLogManager { SplitLogTask slt = new SplitLogTask.Done(worker1, RecoveryMode.LOG_SPLITTING); boolean encounteredZKException = false; try { - ZKUtil.setData(zkw, entry.getKey(), slt.toByteArray()); + ZooKeeperUtil.setData(zkw, entry.getKey(), slt.toByteArray()); } catch (KeeperException e) { LOG.warn(e); encounteredZKException = true; @@ -575,15 +576,15 @@ public class TestSplitLogManager { LOG.info("testRecoveryRegionRemovedFromZK"); conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); String nodePath = - ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, + ZooKeeperUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); - ZKUtil.createSetData(zkw, nodePath, ZKUtil.positionToByteArray(0L)); + ZooKeeperUtil.createSetData(zkw, nodePath, ZKUtil.positionToByteArray(0L)); slm = new SplitLogManager(master, conf); slm.removeStaleRecoveringRegions(null); List recoveringRegions = - zkw.getRecoverableZooKeeper().getChildren(zkw.znodePaths.recoveringRegionsZNode, false); + zkw.getRecoverableZK().getChildren(zkw.znodePaths.recoveringRegionsZNode, false); assertTrue("Recovery regions isn't cleaned", recoveringRegions.isEmpty()); } @@ -596,7 +597,7 @@ public class TestSplitLogManager { // The test is just manipulating ZK manually anyways. conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); - zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), + zkw.getRecoverableZK().create(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), new SplitLogTask.Unassigned( ServerName.valueOf("mgr,1,1"), RecoveryMode.LOG_SPLITTING).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -604,7 +605,7 @@ public class TestSplitLogManager { slm = new SplitLogManager(master, conf); LOG.info("Mode1=" + slm.getRecoveryMode()); assertTrue(slm.isLogSplitting()); - zkw.getRecoverableZooKeeper().delete(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), -1); + zkw.getRecoverableZK().delete(ZKSplitLog.getEncodedNodeName(zkw, "testRecovery"), -1); LOG.info("Mode2=" + slm.getRecoveryMode()); slm.setRecoveryMode(false); LOG.info("Mode3=" + slm.getRecoveryMode()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java index 37def1ba40..f6e4a6dc4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java @@ -28,8 +28,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Assert; @@ -59,7 +60,7 @@ public class TestTableStateManager { final TableName tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.startMiniCluster(2, 1); TEST_UTIL.shutdownMiniHBaseCluster(); - ZooKeeperWatcher watcher = TEST_UTIL.getZooKeeperWatcher(); + ZKWatcher watcher = TEST_UTIL.getZooKeeperWatcher(); setTableStateInZK(watcher, tableName, ZooKeeperProtos.DeprecatedTableState.State.DISABLED); TEST_UTIL.restartHBaseCluster(1); @@ -69,18 +70,18 @@ public class TestTableStateManager { TableState.State.DISABLED); } - private void setTableStateInZK(ZooKeeperWatcher watcher, final TableName tableName, + private void setTableStateInZK(ZKWatcher watcher, final TableName tableName, final ZooKeeperProtos.DeprecatedTableState.State state) throws KeeperException, IOException { - String znode = ZKUtil.joinZNode(watcher.znodePaths.tableZNode, tableName.getNameAsString()); - if (ZKUtil.checkExists(watcher, znode) == -1) { - ZKUtil.createAndFailSilent(watcher, znode); + String znode = ZooKeeperUtil.joinZNode(watcher.znodePaths.tableZNode, tableName.getNameAsString()); + if (ZooKeeperUtil.checkExists(watcher, znode) == -1) { + ZooKeeperUtil.createAndFailSilent(watcher, znode); } ZooKeeperProtos.DeprecatedTableState.Builder builder = ZooKeeperProtos.DeprecatedTableState.newBuilder(); builder.setState(state); byte[] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(watcher, znode, data); + ZooKeeperUtil.setData(watcher, znode, data); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index 572816d163..0491d942b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -44,7 +44,8 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -205,9 +206,9 @@ public class TestHFileCleaner { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { try { - return new ZooKeeperWatcher(getConfiguration(), "dummy server", this); + return new ZKWatcher(getConfiguration(), "dummy server", this); } catch (IOException e) { e.printStackTrace(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index 773d0fcfde..9820fd0937 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -146,9 +146,9 @@ public class TestHFileLinkCleaner { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { try { - return new ZooKeeperWatcher(getConfiguration(), "dummy server", this); + return new ZKWatcher(getConfiguration(), "dummy server", this); } catch (IOException e) { e.printStackTrace(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index b5ca894b04..15ca6fbc53 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueues; @@ -56,8 +56,8 @@ import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.RecoverableZK; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.data.Stat; import org.junit.AfterClass; @@ -234,7 +234,7 @@ public class TestLogsCleaner { // when zk is working both files should be returned cleaner = new ReplicationLogCleaner(); - try (ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null)) { + try (ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null)) { cleaner.setConf(conf, zkw); cleaner.preClean(); Iterable filesToDelete = cleaner.getDeletableFiles(dummyFiles); @@ -255,9 +255,9 @@ public class TestLogsCleaner { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { try { - return new ZooKeeperWatcher(getConfiguration(), "dummy server", this); + return new ZKWatcher(getConfiguration(), "dummy server", this); } catch (IOException e) { e.printStackTrace(); } @@ -321,8 +321,8 @@ public class TestLogsCleaner { } } - static class FaultyZooKeeperWatcher extends ZooKeeperWatcher { - private RecoverableZooKeeper zk; + static class FaultyZooKeeperWatcher extends ZKWatcher { + private RecoverableZK zk; public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable) throws ZooKeeperConnectionException, IOException { @@ -330,12 +330,12 @@ public class TestLogsCleaner { } public void init() throws Exception { - this.zk = spy(super.getRecoverableZooKeeper()); + this.zk = spy(super.getRecoverableZK()); doThrow(new KeeperException.ConnectionLossException()) .when(zk).getData("/hbase/replication/rs", null, new Stat()); } - public RecoverableZooKeeper getRecoverableZooKeeper() { + public RecoverableZK getRecoverableZK() { return zk; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index be7f35ec18..b797d0ba45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.RecoverableZK; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; @@ -54,8 +55,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.data.Stat; import org.junit.After; @@ -240,7 +240,7 @@ public class TestReplicationHFileCleaner { // when zk is working both files should be returned cleaner = new ReplicationHFileCleaner(); - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null); + ZKWatcher zkw = new ZKWatcher(conf, "testZooKeeperAbort-normal", null); try { cleaner.setConf(conf, zkw); Iterable filesToDelete = cleaner.getDeletableFiles(dummyFiles); @@ -263,9 +263,9 @@ public class TestReplicationHFileCleaner { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { try { - return new ZooKeeperWatcher(getConfiguration(), "dummy server", this); + return new ZKWatcher(getConfiguration(), "dummy server", this); } catch (IOException e) { e.printStackTrace(); } @@ -332,20 +332,20 @@ public class TestReplicationHFileCleaner { } } - static class FaultyZooKeeperWatcher extends ZooKeeperWatcher { - private RecoverableZooKeeper zk; + static class FaultyZooKeeperWatcher extends ZKWatcher { + private RecoverableZK zk; public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable) throws ZooKeeperConnectionException, IOException { super(conf, identifier, abortable); } public void init() throws Exception { - this.zk = spy(super.getRecoverableZooKeeper()); + this.zk = spy(super.getRecoverableZK()); doThrow(new KeeperException.ConnectionLossException()) .when(zk).getData("/hbase/replication/hfile-refs", null, new Stat()); } - public RecoverableZooKeeper getRecoverableZooKeeper() { + public RecoverableZK getRecoverableZK() { return zk; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationZKNodeCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationZKNodeCleaner.java index e11143d18c..88d90381d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationZKNodeCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationZKNodeCleaner.java @@ -32,7 +32,8 @@ import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -48,12 +49,12 @@ public class TestReplicationZKNodeCleaner { private final String SERVER_TWO = "server2"; private final Configuration conf; - private final ZooKeeperWatcher zkw; + private final ZKWatcher zkw; private final ReplicationQueues repQueues; public TestReplicationZKNodeCleaner() throws Exception { conf = TEST_UTIL.getConfiguration(); - zkw = new ZooKeeperWatcher(conf, "TestReplicationZKNodeCleaner", null); + zkw = new ZKWatcher(conf, "TestReplicationZKNodeCleaner", null); repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, null, zkw)); assertTrue(repQueues instanceof ReplicationQueuesZKImpl); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 58efa87be1..0dc8d11eb1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -35,7 +35,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.zookeeper.KeeperException; @@ -51,7 +52,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { @Override public void initialize(RegionServerServices rss) throws KeeperException { this.rss = rss; - ZooKeeperWatcher zkw = rss.getZooKeeper(); + ZKWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature()); ThreadPoolExecutor pool = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java index 856e449dfd..aca304373c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedure.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.TimeoutException; import org.apache.hadoop.hbase.procedure.Subprocedure.SubprocedureImpl; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -85,8 +85,8 @@ public class TestZKProcedure { UTIL.shutdownMiniZKCluster(); } - private static ZooKeeperWatcher newZooKeeperWatcher() throws IOException { - return new ZooKeeperWatcher(UTIL.getConfiguration(), "testing utility", new Abortable() { + private static ZKWatcher newZooKeeperWatcher() throws IOException { + return new ZKWatcher(UTIL.getConfiguration(), "testing utility", new Abortable() { @Override public void abort(String why, Throwable e) { throw new RuntimeException( @@ -123,7 +123,7 @@ public class TestZKProcedure { List expected = Arrays.asList(members); // setup the constants - ZooKeeperWatcher coordZkw = newZooKeeperWatcher(); + ZKWatcher coordZkw = newZooKeeperWatcher(); String opDescription = "coordination test - " + members.length + " cohort members"; // start running the controller @@ -144,7 +144,7 @@ public class TestZKProcedure { List> procMembers = new ArrayList<>(members.length); // start each member for (String member : members) { - ZooKeeperWatcher watcher = newZooKeeperWatcher(); + ZKWatcher watcher = newZooKeeperWatcher(); ZKProcedureMemberRpcs comms = new ZKProcedureMemberRpcs(watcher, opDescription); ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE); ProcedureMember procMember = new ProcedureMember(comms, pool2, subprocFactory); @@ -207,7 +207,7 @@ public class TestZKProcedure { final CountDownLatch coordinatorReceivedErrorLatch = new CountDownLatch(1); // start running the coordinator and its controller - ZooKeeperWatcher coordinatorWatcher = newZooKeeperWatcher(); + ZKWatcher coordinatorWatcher = newZooKeeperWatcher(); ZKProcedureCoordinator coordinatorController = new ZKProcedureCoordinator( coordinatorWatcher, opDescription, COORDINATOR_NODE_NAME); ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE); @@ -217,7 +217,7 @@ public class TestZKProcedure { SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class); List> members = new ArrayList<>(expected.size()); for (String member : expected) { - ZooKeeperWatcher watcher = newZooKeeperWatcher(); + ZKWatcher watcher = newZooKeeperWatcher(); ZKProcedureMemberRpcs controller = new ZKProcedureMemberRpcs(watcher, opDescription); ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE); ProcedureMember mem = new ProcedureMember(controller, pool2, subprocFactory); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java index 8b947ee234..d2e633c83d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestZKProcedureControllers.java @@ -37,8 +37,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -80,7 +81,7 @@ public class TestZKProcedureControllers { */ @Test(timeout = 60000) public void testSimpleZKCohortMemberController() throws Exception { - ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); + ZKWatcher watcher = UTIL.getZooKeeperWatcher(); final String operationName = "instanceTest"; final Subprocedure sub = Mockito.mock(Subprocedure.class); @@ -120,15 +121,15 @@ public class TestZKProcedureControllers { // set a prepare node from a 'coordinator' String prepare = ZKProcedureUtil.getAcquireBarrierNode(controller.getZkController(), operationName); - ZKUtil.createSetData(watcher, prepare, ProtobufUtil.prependPBMagic(data)); + ZooKeeperUtil.createSetData(watcher, prepare, ProtobufUtil.prependPBMagic(data)); // wait for the operation to be prepared prepared.await(); // create the commit node so we update the operation to enter the commit phase String commit = ZKProcedureUtil.getReachedBarrierNode(controller.getZkController(), operationName); LOG.debug("Found prepared, posting commit node:" + commit); - ZKUtil.createAndFailSilent(watcher, commit); - LOG.debug("Commit node:" + commit + ", exists:" + ZKUtil.checkExists(watcher, commit)); + ZooKeeperUtil.createAndFailSilent(watcher, commit); + LOG.debug("Commit node:" + commit + ", exists:" + ZooKeeperUtil.checkExists(watcher, commit)); committed.await(); verify(monitor, never()).receive(Mockito.any(ForeignException.class)); @@ -136,9 +137,9 @@ public class TestZKProcedureControllers { // verify(member, never()).getManager().controllerConnectionFailure(Mockito.anyString(), // Mockito.any(IOException.class)); // cleanup after the test - ZKUtil.deleteNodeRecursively(watcher, controller.getZkController().getBaseZnode()); - assertEquals("Didn't delete prepare node", -1, ZKUtil.checkExists(watcher, prepare)); - assertEquals("Didn't delete commit node", -1, ZKUtil.checkExists(watcher, commit)); + ZooKeeperUtil.deleteNodeRecursively(watcher, controller.getZkController().getBaseZnode()); + assertEquals("Didn't delete prepare node", -1, ZooKeeperUtil.checkExists(watcher, prepare)); + assertEquals("Didn't delete commit node", -1, ZooKeeperUtil.checkExists(watcher, commit)); } @Test(timeout = 60000) @@ -172,7 +173,7 @@ public class TestZKProcedureControllers { private void runMockCommitWithOrchestratedControllers(StartControllers controllers, String operationName, byte[] data, String... cohort) throws Exception { - ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); + ZKWatcher watcher = UTIL.getZooKeeperWatcher(); List expected = Lists.newArrayList(cohort); final Subprocedure sub = Mockito.mock(Subprocedure.class); @@ -248,7 +249,7 @@ public class TestZKProcedureControllers { public void runEarlyPrepareNodes(StartControllers controllers, String operationName, byte[] data, String... cohort) throws Exception { - ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); + ZKWatcher watcher = UTIL.getZooKeeperWatcher(); List expected = Lists.newArrayList(cohort); final Subprocedure sub = Mockito.mock(Subprocedure.class); @@ -345,14 +346,14 @@ public class TestZKProcedureControllers { /** * Verify that the prepare, commit and abort nodes for the operation are removed from zookeeper */ - private void verifyZooKeeperClean(String operationName, ZooKeeperWatcher watcher, + private void verifyZooKeeperClean(String operationName, ZKWatcher watcher, ZKProcedureUtil controller) throws Exception { String prepare = ZKProcedureUtil.getAcquireBarrierNode(controller, operationName); String commit = ZKProcedureUtil.getReachedBarrierNode(controller, operationName); String abort = ZKProcedureUtil.getAbortNode(controller, operationName); - assertEquals("Didn't delete prepare node", -1, ZKUtil.checkExists(watcher, prepare)); - assertEquals("Didn't delete commit node", -1, ZKUtil.checkExists(watcher, commit)); - assertEquals("Didn't delete abort node", -1, ZKUtil.checkExists(watcher, abort)); + assertEquals("Didn't delete prepare node", -1, ZooKeeperUtil.checkExists(watcher, prepare)); + assertEquals("Didn't delete commit node", -1, ZooKeeperUtil.checkExists(watcher, commit)); + assertEquals("Didn't delete abort node", -1, ZooKeeperUtil.checkExists(watcher, abort)); } /** @@ -384,7 +385,7 @@ public class TestZKProcedureControllers { */ private abstract class StartControllers { public abstract Pair> start( - ZooKeeperWatcher watcher, String operationName, + ZKWatcher watcher, String operationName, ProcedureCoordinator coordinator, String controllerName, ProcedureMember member, List cohortNames) throws Exception; } @@ -393,7 +394,7 @@ public class TestZKProcedureControllers { @Override public Pair> start( - ZooKeeperWatcher watcher, String operationName, + ZKWatcher watcher, String operationName, ProcedureCoordinator coordinator, String controllerName, ProcedureMember member, List expected) throws Exception { // start the controller @@ -421,7 +422,7 @@ public class TestZKProcedureControllers { @Override public Pair> start( - ZooKeeperWatcher watcher, String operationName, + ZKWatcher watcher, String operationName, ProcedureCoordinator coordinator, String controllerName, ProcedureMember member, List expected) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java index 5e212f83c3..515a7c1ce1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java @@ -25,7 +25,6 @@ import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; @@ -42,8 +41,8 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALProvider; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -122,9 +121,9 @@ public class TestCompactionInDeadRegionServer { public void test() throws Exception { HRegionServer rsToSuspend = UTIL.getRSForFirstRegionInTable(TABLE_NAME); HRegion region = (HRegion) rsToSuspend.getRegions(TABLE_NAME).get(0); - ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); - watcher.getRecoverableZooKeeper().delete( - ZKUtil.joinZNode(watcher.getZNodePaths().rsZNode, rsToSuspend.getServerName().toString()), + ZKWatcher watcher = UTIL.getZooKeeperWatcher(); + watcher.getRecoverableZK().delete( + ZooKeeperUtil.joinZNode(watcher.getZNodePaths().rsZNode, rsToSuspend.getServerName().toString()), -1); UTIL.waitFor(60000, 1000, new ExplainingPredicate() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index aae04dfe16..7f8bcf3519 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -49,7 +49,8 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -865,7 +866,7 @@ public class TestHeapMemoryManager { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java index 84865cc84c..b32cabe253 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMasterAddressTracker.java @@ -31,9 +31,9 @@ import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -80,9 +80,9 @@ public class TestMasterAddressTracker { */ private MasterAddressTracker setupMasterTracker(final ServerName sn, final int infoPort) throws Exception { - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), name.getMethodName(), null); - ZKUtil.createAndFailSilent(zk, zk.znodePaths.baseZNode); + ZooKeeperUtil.createAndFailSilent(zk, zk.znodePaths.baseZNode); // Should not have a master yet MasterAddressTracker addressTracker = new MasterAddressTracker(zk, null); @@ -156,13 +156,13 @@ public class TestMasterAddressTracker { assertEquals("Should receive 0 for backup not found.", 0, addressTracker.getMasterInfoPort()); } - public static class NodeCreationListener extends ZooKeeperListener { + public static class NodeCreationListener extends ZKListener { private static final Log LOG = LogFactory.getLog(NodeCreationListener.class); private Semaphore lock; private String node; - public NodeCreationListener(ZooKeeperWatcher watcher, String node) { + public NodeCreationListener(ZKWatcher watcher, String node) { super(watcher); lock = new Semaphore(0); this.node = node; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index cfecd9fe7b..c9cae7ee43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -92,7 +92,7 @@ public class TestRSStatusServlet { Mockito.doReturn(fakeResponse).when(rpcServices).getServerInfo( (RpcController)Mockito.any(), (GetServerInfoRequest)Mockito.any()); // Fake ZKW - ZooKeeperWatcher zkw = Mockito.mock(ZooKeeperWatcher.class); + ZKWatcher zkw = Mockito.mock(ZKWatcher.class); Mockito.doReturn("fakequorum").when(zkw).getQuorum(); Mockito.doReturn(zkw).when(rs).getZooKeeper(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java index 872fec6722..1ff3212a8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java @@ -35,8 +35,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -99,8 +100,8 @@ public class TestRegionServerHostname { TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY, hostName); TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); try { - ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); - List servers = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode); + ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + List servers = ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode); // there would be NUM_RS+1 children - one for the master assertTrue(servers.size() == NUM_RS + (LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration())? 1: 0)); @@ -160,8 +161,8 @@ public class TestRegionServerHostname { TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration()); int expectedRS = NUM_RS + (tablesOnMaster? 1: 0); - try (ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher()) { - List servers = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode); + try (ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher()) { + List servers = ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode); assertEquals(expectedRS, servers.size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index f8b9f6eeaa..64c6c673f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -106,7 +106,7 @@ public class TestRegionServerNoMaster { // so that regions can be assigned during the mocking phase. HRegionServer hrs = HTU.getHBaseCluster() .getLiveRegionServerThreads().get(0).getRegionServer(); - ZooKeeperWatcher zkw = hrs.getZooKeeper(); + ZKWatcher zkw = hrs.getZooKeeper(); MetaTableLocator mtl = new MetaTableLocator(); ServerName sn = mtl.getMetaRegionLocation(zkw); if (sn != null && !masterAddr.equals(sn)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 40077f9013..8ff5f70736 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -53,8 +53,8 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.zookeeper.CreateMode; @@ -75,17 +75,17 @@ public class TestSplitLogWorker { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private DummyServer ds; - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private SplitLogWorker slw; private ExecutorService executorService; private RecoveryMode mode; class DummyServer implements Server { - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private Configuration conf; private CoordinatedStateManager cm; - public DummyServer(ZooKeeperWatcher zkw, Configuration conf) { + public DummyServer(ZKWatcher zkw, Configuration conf) { this.zkw = zkw; this.conf = conf; cm = new ZkCoordinatedStateManager(this); @@ -115,7 +115,7 @@ public class TestSplitLogWorker { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } @@ -195,19 +195,19 @@ public class TestSplitLogWorker { public void setup() throws Exception { TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); - zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "split-log-worker-tests", null); ds = new DummyServer(zkw, conf); - ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode); - ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode); - assertThat(ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode), not (is(-1))); + ZooKeeperUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode); + ZooKeeperUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode); + assertThat(ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.baseZNode), not (is(-1))); LOG.debug(zkw.znodePaths.baseZNode + " created"); - ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.splitLogZNode); - assertThat(ZKUtil.checkExists(zkw, zkw.znodePaths.splitLogZNode), not (is(-1))); + ZooKeeperUtil.createAndFailSilent(zkw, zkw.znodePaths.splitLogZNode); + assertThat(ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.splitLogZNode), not (is(-1))); LOG.debug(zkw.znodePaths.splitLogZNode + " created"); - ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.rsZNode); - assertThat(ZKUtil.checkExists(zkw, zkw.znodePaths.rsZNode), not (is(-1))); + ZooKeeperUtil.createAndFailSilent(zkw, zkw.znodePaths.rsZNode); + assertThat(ZooKeeperUtil.checkExists(zkw, zkw.znodePaths.rsZNode), not (is(-1))); SplitLogCounters.resetCounters(); executorService = new ExecutorService("TestSplitLogWorker"); @@ -250,7 +250,7 @@ public class TestSplitLogWorker { final String TATAS = "tatas"; final ServerName RS = ServerName.valueOf("rs,1,1"); RegionServerServices mockedRS = getRegionServer(RS); - zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), + zkw.getRecoverableZK().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -260,7 +260,7 @@ public class TestSplitLogWorker { slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); - byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS)); + byte [] bytes = ZooKeeperUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } finally { @@ -286,7 +286,7 @@ public class TestSplitLogWorker { final String TRFT = "trft"; final ServerName SVR1 = ServerName.valueOf("svr1,1,1"); final ServerName SVR2 = ServerName.valueOf("svr2,1,1"); - zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT), + zkw.getRecoverableZK().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT), new SplitLogTask.Unassigned(MANAGER, this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); RegionServerServices mockedRS1 = getRegionServer(SVR1); @@ -304,7 +304,7 @@ public class TestSplitLogWorker { assertTrue(waitForCounterBoolean(SplitLogCounters.tot_wkr_failed_to_grab_task_owned, 0, 1, WAIT_TIME, false) || SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.sum() == 1); - byte [] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TRFT)); + byte [] bytes = ZooKeeperUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TRFT)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SVR1) || slt.isOwned(SVR2)); } finally { @@ -329,17 +329,17 @@ public class TestSplitLogWorker { waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME); // this time create a task node after starting the splitLogWorker - zkw.getRecoverableZooKeeper().create(PATH, + zkw.getRecoverableZK().create(PATH, new SplitLogTask.Unassigned(MANAGER, this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); assertEquals(1, slw.getTaskReadySeq()); - byte [] bytes = ZKUtil.getData(zkw, PATH); + byte [] bytes = ZooKeeperUtil.getData(zkw, PATH); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SRV)); slt = new SplitLogTask.Owned(MANAGER, this.mode); - ZKUtil.setData(zkw, PATH, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, PATH, slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME); } finally { stopSplitLogWorker(slw); @@ -363,7 +363,7 @@ public class TestSplitLogWorker { SplitLogTask unassignedManager = new SplitLogTask.Unassigned(MANAGER, this.mode); - zkw.getRecoverableZooKeeper().create(PATH1, unassignedManager.toByteArray(), + zkw.getRecoverableZK().create(PATH1, unassignedManager.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); @@ -371,18 +371,18 @@ public class TestSplitLogWorker { // create another task final String PATH2 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task_2"); - zkw.getRecoverableZooKeeper().create(PATH2, unassignedManager.toByteArray(), + zkw.getRecoverableZK().create(PATH2, unassignedManager.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); // preempt the first task, have it owned by another worker final ServerName anotherWorker = ServerName.valueOf("another-worker,1,1"); SplitLogTask slt = new SplitLogTask.Owned(anotherWorker, this.mode); - ZKUtil.setData(zkw, PATH1, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, PATH1, slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME); assertEquals(2, slw.getTaskReadySeq()); - byte [] bytes = ZKUtil.getData(zkw, PATH2); + byte [] bytes = ZooKeeperUtil.getData(zkw, PATH2); slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(SRV)); } finally { @@ -403,30 +403,30 @@ public class TestSplitLogWorker { String task = ZKSplitLog.getEncodedNodeName(zkw, "task"); SplitLogTask slt = new SplitLogTask.Unassigned(MANAGER, this.mode); - zkw.getRecoverableZooKeeper().create(task,slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, + zkw.getRecoverableZK().create(task,slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); // now the worker is busy doing the above task // preempt the task, have it owned by another worker - ZKUtil.setData(zkw, task, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, task, slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 0, 1, WAIT_TIME); // create a RESCAN node String rescan = ZKSplitLog.getEncodedNodeName(zkw, "RESCAN"); - rescan = zkw.getRecoverableZooKeeper().create(rescan, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, + rescan = zkw.getRecoverableZK().create(rescan, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 1, 2, WAIT_TIME); // RESCAN node might not have been processed if the worker became busy // with the above task. preempt the task again so that now the RESCAN // node is processed - ZKUtil.setData(zkw, task, slt.toByteArray()); + ZooKeeperUtil.setData(zkw, task, slt.toByteArray()); waitForCounter(SplitLogCounters.tot_wkr_preempt_task, 1, 2, WAIT_TIME); waitForCounter(SplitLogCounters.tot_wkr_task_acquired_rescan, 0, 1, WAIT_TIME); - List nodes = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.splitLogZNode); + List nodes = ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.splitLogZNode); LOG.debug(nodes); int num = 0; for (String node : nodes) { @@ -434,7 +434,8 @@ public class TestSplitLogWorker { if (node.startsWith("RESCAN")) { String name = ZKSplitLog.getEncodedNodeName(zkw, node); String fn = ZKSplitLog.getFileName(name); - byte [] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(zkw.znodePaths.splitLogZNode, fn)); + byte [] data = ZooKeeperUtil.getData(zkw, ZooKeeperUtil + .joinZNode(zkw.znodePaths.splitLogZNode, fn)); slt = SplitLogTask.parseFrom(data); assertTrue(slt.toString(), slt.isDone(SRV)); } @@ -453,7 +454,7 @@ public class TestSplitLogWorker { testConf.setInt("hbase.regionserver.wal.max.splitters", maxTasks); RegionServerServices mockedRS = getRegionServer(RS); for (int i = 0; i < maxTasks; i++) { - zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), + zkw.getRecoverableZK().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } @@ -463,7 +464,7 @@ public class TestSplitLogWorker { try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, maxTasks, WAIT_TIME); for (int i = 0; i < maxTasks; i++) { - byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); + byte[] bytes = ZooKeeperUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); assertTrue(slt.isOwned(RS)); } @@ -490,13 +491,13 @@ public class TestSplitLogWorker { RegionServerServices mockedRS = getRegionServer(RS); // create two RS nodes - String rsPath = ZKUtil.joinZNode(zkw.znodePaths.rsZNode, RS.getServerName()); - zkw.getRecoverableZooKeeper().create(rsPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); - rsPath = ZKUtil.joinZNode(zkw.znodePaths.rsZNode, RS2.getServerName()); - zkw.getRecoverableZooKeeper().create(rsPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); + String rsPath = ZooKeeperUtil.joinZNode(zkw.znodePaths.rsZNode, RS.getServerName()); + zkw.getRecoverableZK().create(rsPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); + rsPath = ZooKeeperUtil.joinZNode(zkw.znodePaths.rsZNode, RS2.getServerName()); + zkw.getRecoverableZK().create(rsPath, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); for (int i = 0; i < maxTasks; i++) { - zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), + zkw.getRecoverableZK().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } @@ -507,7 +508,7 @@ public class TestSplitLogWorker { int acquiredTasks = 0; waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 2, WAIT_TIME); for (int i = 0; i < maxTasks; i++) { - byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); + byte[] bytes = ZooKeeperUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i)); SplitLogTask slt = SplitLogTask.parseFrom(bytes); if (slt.isOwned(RS)) { acquiredTasks++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 8533004353..f9a12bb017 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -41,7 +41,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -54,7 +53,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; @@ -829,13 +828,12 @@ public class TestSplitTransactionOnCluster { * @return Index of the server hosting the single table region * @throws UnknownRegionException * @throws MasterNotRunningException - * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException + * @throws ZooKeeperConnectionException * @throws InterruptedException */ private int ensureTableRegionNotOnSameServerAsMeta(final Admin admin, final RegionInfo hri) - throws IOException, MasterNotRunningException, - ZooKeeperConnectionException, InterruptedException { + throws IOException, MasterNotRunningException, ZooKeeperConnectionException, InterruptedException { // Now make sure that the table region is not on same server as that hosting // hbase:meta We don't want hbase:meta replay polluting our test when we later crash // the table region serving server. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 2e5c5525a9..10a84b13f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -505,7 +505,7 @@ public class TestWALLockup { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index 457a5d062d..6ea26720f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -74,8 +74,8 @@ import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -202,11 +202,12 @@ public class TestMasterReplication { Table[] htables = getHTablesOnClusters(tableName); putAndWait(row, famName, htables[0], htables[0]); rollWALAndWait(utilities[0], table.getTableName(), row); - ZooKeeperWatcher zkw = utilities[0].getZooKeeperWatcher(); + ZKWatcher zkw = utilities[0].getZooKeeperWatcher(); String queuesZnode = - ZKUtil.joinZNode(zkw.getZNodePaths().baseZNode, ZKUtil.joinZNode("replication", "rs")); + ZooKeeperUtil + .joinZNode(zkw.getZNodePaths().baseZNode, ZooKeeperUtil.joinZNode("replication", "rs")); List listChildrenNoWatch = - ZKUtil.listChildrenNoWatch(zkw, ZKUtil.joinZNode(queuesZnode, rsName.toString())); + ZooKeeperUtil.listChildrenNoWatch(zkw, ZooKeeperUtil.joinZNode(queuesZnode, rsName.toString())); assertEquals(0, listChildrenNoWatch.size()); } @@ -492,7 +493,7 @@ public class TestMasterReplication { utility.startMiniCluster(); utilities[i] = utility; configurations[i] = conf; - new ZooKeeperWatcher(conf, "cluster" + i, null, true); + new ZKWatcher(conf, "cluster" + i, null, true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index a9896ce8c8..56d55ff8f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -52,7 +52,8 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -104,7 +105,7 @@ public class TestMultiSlaveReplication { utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); utility1.setZkCluster(miniZK); - new ZooKeeperWatcher(conf1, "cluster1", null, true); + new ZKWatcher(conf1, "cluster1", null, true); conf2 = new Configuration(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); @@ -114,11 +115,11 @@ public class TestMultiSlaveReplication { utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); - new ZooKeeperWatcher(conf2, "cluster2", null, true); + new ZKWatcher(conf2, "cluster2", null, true); utility3 = new HBaseTestingUtility(conf3); utility3.setZkCluster(miniZK); - new ZooKeeperWatcher(conf3, "cluster3", null, true); + new ZKWatcher(conf3, "cluster3", null, true); table = new HTableDescriptor(tableName); HColumnDescriptor fam = new HColumnDescriptor(famName); @@ -190,7 +191,7 @@ public class TestMultiSlaveReplication { // Even if the log was rolled in the middle of the replication // "row" is still replication. checkRow(row, 1, htable2); - // Replication thread of cluster 2 may be sleeping, and since row2 is not there in it, + // Replication thread of cluster 2 may be sleeping, and since row2 is not there in it, // we should wait before checking. checkWithWait(row, 1, htable3); @@ -244,7 +245,7 @@ public class TestMultiSlaveReplication { region.getWAL().unregisterWALActionsListener(listener); } - + private void checkWithWait(byte[] row, int count, Table table) throws Exception { Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { @@ -267,7 +268,7 @@ public class TestMultiSlaveReplication { } } } - + private void checkRow(byte[] row, int count, Table... tables) throws IOException { Get get = new Get(row); for (Table table : tables) { @@ -299,7 +300,7 @@ public class TestMultiSlaveReplication { if (removedFromAll) { break; } else { - Thread.sleep(SLEEP_TIME); + Thread.sleep(SLEEP_TIME); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index abf2db3a1a..94a1e07b20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -114,7 +114,7 @@ public class TestPerTableCFReplication { utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); - new ZooKeeperWatcher(conf1, "cluster1", null, true); + new ZKWatcher(conf1, "cluster1", null, true); conf2 = new Configuration(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); @@ -124,11 +124,11 @@ public class TestPerTableCFReplication { utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); - new ZooKeeperWatcher(conf2, "cluster3", null, true); + new ZKWatcher(conf2, "cluster3", null, true); utility3 = new HBaseTestingUtility(conf3); utility3.setZkCluster(miniZK); - new ZooKeeperWatcher(conf3, "cluster3", null, true); + new ZKWatcher(conf3, "cluster3", null, true); table = new HTableDescriptor(tableName); HColumnDescriptor fam = new HColumnDescriptor(famName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 58b97b9c5b..bfa8d960aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -40,7 +40,8 @@ import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.runners.Parameterized.Parameter; @@ -64,8 +65,8 @@ public class TestReplicationBase { protected static Configuration conf2; protected static Configuration CONF_WITH_LOCALFS; - protected static ZooKeeperWatcher zkw1; - protected static ZooKeeperWatcher zkw2; + protected static ZKWatcher zkw1; + protected static ZKWatcher zkw2; protected static ReplicationAdmin admin; protected static Admin hbaseAdmin; @@ -127,8 +128,8 @@ public class TestReplicationBase { MiniZooKeeperCluster miniZK = utility1.getZkCluster(); // Have to reget conf1 in case zk cluster location different // than default - conf1 = utility1.getConfiguration(); - zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true); + conf1 = utility1.getConfiguration(); + zkw1 = new ZKWatcher(conf1, "cluster1", null, true); admin = new ReplicationAdmin(conf1); LOG.info("Setup first Zk"); @@ -140,7 +141,7 @@ public class TestReplicationBase { utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); - zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true); + zkw2 = new ZKWatcher(conf2, "cluster2", null, true); LOG.info("Setup second Zk"); CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java index 2fe09afacf..885dd060f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java @@ -29,8 +29,9 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; @@ -53,7 +54,7 @@ public class TestReplicationStateHBaseImpl { private static Configuration conf; private static HBaseTestingUtility utility; - private static ZooKeeperWatcher zkw; + private static ZKWatcher zkw; private static String replicationZNode; private static ReplicationQueues rq1; @@ -88,7 +89,7 @@ public class TestReplicationStateHBaseImpl { utility.startMiniCluster(); zkw = HBaseTestingUtility.getZooKeeperWatcher(utility); String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); - replicationZNode = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, replicationZNodeName); + replicationZNode = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, replicationZNodeName); } @Before @@ -387,7 +388,7 @@ public class TestReplicationStateHBaseImpl { @After public void tearDown() throws KeeperException, IOException { - ZKUtil.deleteNodeRecursively(zkw, replicationZNode); + ZooKeeperUtil.deleteNodeRecursively(zkw, replicationZNode); } @AfterClass @@ -412,7 +413,7 @@ public class TestReplicationStateHBaseImpl { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 7d586ad184..040e4d7a39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -40,8 +40,9 @@ import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKConfig; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; @@ -57,7 +58,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { private static Configuration conf; private static HBaseTestingUtility utility; - private static ZooKeeperWatcher zkw; + private static ZKWatcher zkw; private static String replicationZNode; private ReplicationQueuesZKImpl rqZK; @@ -69,7 +70,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true); zkw = HBaseTestingUtility.getZooKeeperWatcher(utility); String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication"); - replicationZNode = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, replicationZNodeName); + replicationZNode = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, replicationZNodeName); KEY_ONE = initPeerClusterState("/hbase1"); KEY_TWO = initPeerClusterState("/hbase2"); } @@ -79,9 +80,9 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { // Add a dummy region server and set up the cluster id Configuration testConf = new Configuration(conf); testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode); - ZooKeeperWatcher zkw1 = new ZooKeeperWatcher(testConf, "test1", null); - String fakeRs = ZKUtil.joinZNode(zkw1.znodePaths.rsZNode, "hostname1.example.org:1234"); - ZKUtil.createWithParents(zkw1, fakeRs); + ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null); + String fakeRs = ZooKeeperUtil.joinZNode(zkw1.znodePaths.rsZNode, "hostname1.example.org:1234"); + ZooKeeperUtil.createWithParents(zkw1, fakeRs); ZKClusterId.setClusterId(zkw1, new ClusterId()); return ZKConfig.getZooKeeperClusterKey(testConf); } @@ -111,7 +112,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { @After public void tearDown() throws KeeperException, IOException { - ZKUtil.deleteNodeRecursively(zkw, replicationZNode); + ZooKeeperUtil.deleteNodeRecursively(zkw, replicationZNode); } @AfterClass @@ -126,13 +127,14 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { @Test public void testIsPeerPath_PathToChildOfPeerNode() { - String peerChild = ZKUtil.joinZNode(ZKUtil.joinZNode(rqZK.peersZNode, "1"), "child"); + String peerChild = ZooKeeperUtil + .joinZNode(ZooKeeperUtil.joinZNode(rqZK.peersZNode, "1"), "child"); assertFalse(rqZK.isPeerPath(peerChild)); } @Test public void testIsPeerPath_ActualPeerPath() { - String peerPath = ZKUtil.joinZNode(rqZK.peersZNode, "1"); + String peerPath = ZooKeeperUtil.joinZNode(rqZK.peersZNode, "1"); assertTrue(rqZK.isPeerPath(peerPath)); } @@ -151,7 +153,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTableBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTableBase.java index 83fdad7516..665eedb1dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTableBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTableBase.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -43,7 +43,7 @@ public class TestReplicationTableBase { private static long TIME_OUT_MILLIS = 3000; private static Configuration conf; private static HBaseTestingUtility utility; - private static ZooKeeperWatcher zkw; + private static ZKWatcher zkw; private static ReplicationTableBase rb; private static ReplicationQueues rq; private static ReplicationQueuesClient rqc; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 1e6e153eeb..d75b9d1d44 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -66,7 +66,7 @@ public class TestReplicationTrackerZKImpl { private static HBaseTestingUtility utility; // Each one of the below variables are reinitialized before every test case - private ZooKeeperWatcher zkw; + private ZKWatcher zkw; private ReplicationPeers rp; private ReplicationTracker rt; private AtomicInteger rsRemovedCount; @@ -81,14 +81,14 @@ public class TestReplicationTrackerZKImpl { utility = new HBaseTestingUtility(); utility.startMiniZKCluster(); conf = utility.getConfiguration(); - ZooKeeperWatcher zk = HBaseTestingUtility.getZooKeeperWatcher(utility); - ZKUtil.createWithParents(zk, zk.znodePaths.rsZNode); + ZKWatcher zk = HBaseTestingUtility.getZooKeeperWatcher(utility); + ZooKeeperUtil.createWithParents(zk, zk.znodePaths.rsZNode); } @Before public void setUp() throws Exception { zkw = HBaseTestingUtility.getZooKeeperWatcher(utility); - String fakeRs1 = ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234"); + String fakeRs1 = ZooKeeperUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234"); try { ZKClusterId.setClusterId(zkw, new ClusterId()); rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw); @@ -116,32 +116,38 @@ public class TestReplicationTrackerZKImpl { assertEquals(0, rt.getListOfRegionServers().size()); // 1 region server - ZKUtil.createWithParents(zkw, - ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234")); + ZooKeeperUtil.createWithParents(zkw, + ZooKeeperUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234")); assertEquals(1, rt.getListOfRegionServers().size()); // 2 region servers - ZKUtil.createWithParents(zkw, - ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234")); + ZooKeeperUtil.createWithParents(zkw, + ZooKeeperUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234")); assertEquals(2, rt.getListOfRegionServers().size()); // 1 region server - ZKUtil.deleteNode(zkw, ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234")); + ZooKeeperUtil + .deleteNode(zkw, ZooKeeperUtil + .joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234")); assertEquals(1, rt.getListOfRegionServers().size()); // 0 region server - ZKUtil.deleteNode(zkw, ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234")); + ZooKeeperUtil + .deleteNode(zkw, ZooKeeperUtil + .joinZNode(zkw.znodePaths.rsZNode, "hostname1.example.org:1234")); assertEquals(0, rt.getListOfRegionServers().size()); } @Test(timeout = 30000) public void testRegionServerRemovedEvent() throws Exception { - ZKUtil.createAndWatch(zkw, - ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234"), + ZooKeeperUtil.createAndWatch(zkw, + ZooKeeperUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234"), HConstants.EMPTY_BYTE_ARRAY); rt.registerListener(new DummyReplicationListener()); // delete one - ZKUtil.deleteNode(zkw, ZKUtil.joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234")); + ZooKeeperUtil + .deleteNode(zkw, ZooKeeperUtil + .joinZNode(zkw.znodePaths.rsZNode, "hostname2.example.org:1234")); // wait for event while (rsRemovedCount.get() < 1) { Thread.sleep(5); @@ -165,12 +171,12 @@ public class TestReplicationTrackerZKImpl { public void testPeerListChangedEvent() throws Exception { // add a peer rp.registerPeer("5", new ReplicationPeerConfig().setClusterKey(utility.getClusterKey())); - zkw.getRecoverableZooKeeper().getZooKeeper().getChildren("/hbase/replication/peers/5", true); + zkw.getRecoverableZK().getZooKeeper().getChildren("/hbase/replication/peers/5", true); rt.registerListener(new DummyReplicationListener()); rp.disablePeer("5"); int tmp = plChangedCount.get(); LOG.info("Peer count=" + tmp); - ZKUtil.deleteNode(zkw, "/hbase/replication/peers/5/peer-state"); + ZooKeeperUtil.deleteNode(zkw, "/hbase/replication/peers/5/peer-state"); // wait for event while (plChangedCount.get() <= tmp) { Thread.sleep(100); @@ -189,7 +195,7 @@ public class TestReplicationTrackerZKImpl { int exists = 0; int hyphen = 0; rp.registerPeer("6", new ReplicationPeerConfig().setClusterKey(utility.getClusterKey())); - + try{ rp.registerPeer("6", new ReplicationPeerConfig().setClusterKey(utility.getClusterKey())); }catch(IllegalArgumentException e){ @@ -203,11 +209,11 @@ public class TestReplicationTrackerZKImpl { } assertEquals(1, exists); assertEquals(1, hyphen); - + // clean up rp.unregisterPeer("6"); } - + private class DummyReplicationListener implements ReplicationListener { @Override @@ -248,7 +254,7 @@ public class TestReplicationTrackerZKImpl { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java index 7a8e6390fa..ebd4f9ae3f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java @@ -54,7 +54,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -102,14 +103,14 @@ public class TestSerialReplication { utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); - new ZooKeeperWatcher(conf1, "cluster1", null, true); + new ZKWatcher(conf1, "cluster1", null, true); conf2 = new Configuration(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); - new ZooKeeperWatcher(conf2, "cluster2", null, true); + new ZKWatcher(conf2, "cluster2", null, true); utility1.startMiniCluster(1, 10); utility2.startMiniCluster(1, 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestTableCFsUpdater.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestTableCFsUpdater.java index 8c604f46a8..853096ad7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestTableCFsUpdater.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/master/TestTableCFsUpdater.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -55,7 +55,7 @@ public class TestTableCFsUpdater extends TableCFsUpdater { private static final Log LOG = LogFactory.getLog(TestTableCFsUpdater.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static ZooKeeperWatcher zkw = null; + private static ZKWatcher zkw = null; private static Abortable abortable = null; @Rule @@ -80,7 +80,7 @@ public class TestTableCFsUpdater extends TableCFsUpdater { return false; } }; - zkw = new ZooKeeperWatcher(conf, "TableCFs", abortable, true); + zkw = new ZKWatcher(conf, "TableCFs", abortable, true); } @AfterClass @@ -99,15 +99,16 @@ public class TestTableCFsUpdater extends TableCFsUpdater { ReplicationPeerConfig rpc = new ReplicationPeerConfig(); rpc.setClusterKey(zkw.getQuorum()); String peerNode = getPeerNode(peerId); - ZKUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); + ZooKeeperUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); String tableCFs = tableName1 + ":cf1,cf2;" + tableName2 + ":cf3;" + tableName3; String tableCFsNode = getTableCFsNode(peerId); LOG.info("create tableCFs :" + tableCFsNode + " for peerId=" + peerId); - ZKUtil.createWithParents(zkw, tableCFsNode , Bytes.toBytes(tableCFs)); + ZooKeeperUtil.createWithParents(zkw, tableCFsNode , Bytes.toBytes(tableCFs)); - ReplicationPeerConfig actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); - String actualTableCfs = Bytes.toString(ZKUtil.getData(zkw, tableCFsNode)); + ReplicationPeerConfig actualRpc = ReplicationSerDeHelper.parsePeerFrom( + ZooKeeperUtil.getData(zkw, peerNode)); + String actualTableCfs = Bytes.toString(ZooKeeperUtil.getData(zkw, tableCFsNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); assertNull(actualRpc.getTableCFsMap()); @@ -117,15 +118,15 @@ public class TestTableCFsUpdater extends TableCFsUpdater { rpc = new ReplicationPeerConfig(); rpc.setClusterKey(zkw.getQuorum()); peerNode = getPeerNode(peerId); - ZKUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); + ZooKeeperUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); tableCFs = tableName1 + ":cf1,cf3;" + tableName2 + ":cf2"; tableCFsNode = getTableCFsNode(peerId); LOG.info("create tableCFs :" + tableCFsNode + " for peerId=" + peerId); - ZKUtil.createWithParents(zkw, tableCFsNode , Bytes.toBytes(tableCFs)); + ZooKeeperUtil.createWithParents(zkw, tableCFsNode , Bytes.toBytes(tableCFs)); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); - actualTableCfs = Bytes.toString(ZKUtil.getData(zkw, tableCFsNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); + actualTableCfs = Bytes.toString(ZooKeeperUtil.getData(zkw, tableCFsNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); assertNull(actualRpc.getTableCFsMap()); @@ -135,15 +136,15 @@ public class TestTableCFsUpdater extends TableCFsUpdater { rpc = new ReplicationPeerConfig(); rpc.setClusterKey(zkw.getQuorum()); peerNode = getPeerNode(peerId); - ZKUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); + ZooKeeperUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); tableCFs = ""; tableCFsNode = getTableCFsNode(peerId); LOG.info("create tableCFs :" + tableCFsNode + " for peerId=" + peerId); - ZKUtil.createWithParents(zkw, tableCFsNode , Bytes.toBytes(tableCFs)); + ZooKeeperUtil.createWithParents(zkw, tableCFsNode , Bytes.toBytes(tableCFs)); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); - actualTableCfs = Bytes.toString(ZKUtil.getData(zkw, tableCFsNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); + actualTableCfs = Bytes.toString(ZooKeeperUtil.getData(zkw, tableCFsNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); assertNull(actualRpc.getTableCFsMap()); @@ -153,11 +154,11 @@ public class TestTableCFsUpdater extends TableCFsUpdater { rpc = new ReplicationPeerConfig(); rpc.setClusterKey(zkw.getQuorum()); peerNode = getPeerNode(peerId); - ZKUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); + ZooKeeperUtil.createWithParents(zkw, peerNode, ReplicationSerDeHelper.toByteArray(rpc)); tableCFsNode = getTableCFsNode(peerId); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); - actualTableCfs = Bytes.toString(ZKUtil.getData(zkw, tableCFsNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); + actualTableCfs = Bytes.toString(ZooKeeperUtil.getData(zkw, tableCFsNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); assertNull(actualRpc.getTableCFsMap()); @@ -167,7 +168,7 @@ public class TestTableCFsUpdater extends TableCFsUpdater { peerId = "1"; peerNode = getPeerNode(peerId); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); Map> tableNameListMap = actualRpc.getTableCFsMap(); assertEquals(3, tableNameListMap.size()); @@ -184,7 +185,7 @@ public class TestTableCFsUpdater extends TableCFsUpdater { peerId = "2"; peerNode = getPeerNode(peerId); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); tableNameListMap = actualRpc.getTableCFsMap(); assertEquals(2, tableNameListMap.size()); @@ -198,14 +199,14 @@ public class TestTableCFsUpdater extends TableCFsUpdater { peerId = "3"; peerNode = getPeerNode(peerId); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); tableNameListMap = actualRpc.getTableCFsMap(); assertNull(tableNameListMap); peerId = "4"; peerNode = getPeerNode(peerId); - actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZKUtil.getData(zkw, peerNode)); + actualRpc = ReplicationSerDeHelper.parsePeerFrom(ZooKeeperUtil.getData(zkw, peerNode)); assertEquals(rpc.getClusterKey(), actualRpc.getClusterKey()); tableNameListMap = actualRpc.getTableCFsMap(); assertNull(tableNameListMap); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java index 2469c7c726..9b1648f631 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -86,14 +86,14 @@ public class TestGlobalThrottler { utility1 = new HBaseTestingUtility(conf1); utility1.startMiniZKCluster(); MiniZooKeeperCluster miniZK = utility1.getZkCluster(); - new ZooKeeperWatcher(conf1, "cluster1", null, true); + new ZKWatcher(conf1, "cluster1", null, true); conf2 = new Configuration(conf1); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); utility2 = new HBaseTestingUtility(conf2); utility2.setZkCluster(miniZK); - new ZooKeeperWatcher(conf2, "cluster2", null, true); + new ZKWatcher(conf2, "cluster2", null, true); ReplicationAdmin admin1 = new ReplicationAdmin(conf1); ReplicationPeerConfig rpc = new ReplicationPeerConfig(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 8451d694f3..074d06eca9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -82,8 +82,9 @@ import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -116,7 +117,7 @@ public abstract class TestReplicationSourceManager { protected static ReplicationSourceManager manager; - protected static ZooKeeperWatcher zkw; + protected static ZKWatcher zkw; protected static HTableDescriptor htd; @@ -149,17 +150,17 @@ public abstract class TestReplicationSourceManager { protected static void setupZkAndReplication() throws Exception { // The implementing class should set up the conf assertNotNull(conf); - zkw = new ZooKeeperWatcher(conf, "test", null); - ZKUtil.createWithParents(zkw, "/hbase/replication"); - ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1"); - ZKUtil.setData(zkw, "/hbase/replication/peers/1", + zkw = new ZKWatcher(conf, "test", null); + ZooKeeperUtil.createWithParents(zkw, "/hbase/replication"); + ZooKeeperUtil.createWithParents(zkw, "/hbase/replication/peers/1"); + ZooKeeperUtil.setData(zkw, "/hbase/replication/peers/1", Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1")); - ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state"); - ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state", + ZooKeeperUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state"); + ZooKeeperUtil.setData(zkw, "/hbase/replication/peers/1/peer-state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES); - ZKUtil.createWithParents(zkw, "/hbase/replication/state"); - ZKUtil.setData(zkw, "/hbase/replication/state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES); + ZooKeeperUtil.createWithParents(zkw, "/hbase/replication/state"); + ZooKeeperUtil.setData(zkw, "/hbase/replication/state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES); ZKClusterId.setClusterId(zkw, new ClusterId()); FSUtils.setRootDir(utility.getConfiguration(), utility.getDataTestDir()); @@ -668,7 +669,7 @@ public abstract class TestReplicationSourceManager { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zkw; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java index ad8cb14067..ee6140464f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java @@ -55,8 +55,8 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.TestTableName; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -509,10 +509,12 @@ public class TestAccessController2 extends SecureTestUtil { // Namespace needs this, as they follow the lazy creation of ACL znode. grantOnNamespace(TEST_UTIL, TESTGROUP1_USER1.getShortName(), ns, Action.ADMIN); - ZooKeeperWatcher zkw = TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper(); - assertTrue("The acl znode for table should exist", ZKUtil.checkExists(zkw, baseAclZNode + + ZKWatcher zkw = TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper(); + assertTrue("The acl znode for table should exist", ZooKeeperUtil + .checkExists(zkw, baseAclZNode + table.getNameAsString()) != -1); - assertTrue("The acl znode for namespace should exist", ZKUtil.checkExists(zkw, baseAclZNode + + assertTrue("The acl znode for namespace should exist", ZooKeeperUtil + .checkExists(zkw, baseAclZNode + convertToNamespace(ns)) != -1); revokeFromNamespace(TEST_UTIL, TESTGROUP1_USER1.getShortName(), ns, Action.ADMIN); @@ -520,8 +522,8 @@ public class TestAccessController2 extends SecureTestUtil { deleteNamespace(TEST_UTIL, ns); assertTrue("The acl znode for table should have been deleted", - ZKUtil.checkExists(zkw, baseAclZNode + table.getNameAsString()) == -1); + ZooKeeperUtil.checkExists(zkw, baseAclZNode + table.getNameAsString()) == -1); assertTrue( "The acl znode for namespace should have been deleted", - ZKUtil.checkExists(zkw, baseAclZNode + convertToNamespace(ns)) == -1); + ZooKeeperUtil.checkExists(zkw, baseAclZNode + convertToNamespace(ns)) == -1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java index 6ca979072c..a9de6c055a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java @@ -44,7 +44,8 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -61,7 +62,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap; public class TestTablePermissions { private static final Log LOG = LogFactory.getLog(TestTablePermissions.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static ZooKeeperWatcher ZKW; + private static ZKWatcher ZKW; private final static Abortable ABORTABLE = new Abortable() { private final AtomicBoolean abort = new AtomicBoolean(false); @@ -97,7 +98,7 @@ public class TestTablePermissions { // Wait for the ACL table to become available UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME); - ZKW = new ZooKeeperWatcher(UTIL.getConfiguration(), + ZKW = new ZKWatcher(UTIL.getConfiguration(), "TestTablePermissions", ABORTABLE); UTIL.createTable(TEST_TABLE, TEST_FAMILY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java index 76de0c6048..18fb15fd52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -75,9 +75,9 @@ public class TestZKPermissionWatcher { // start minicluster UTIL.startMiniCluster(); - AUTH_A = TableAuthManager.getOrCreate(new ZooKeeperWatcher(conf, + AUTH_A = TableAuthManager.getOrCreate(new ZKWatcher(conf, "TestZKPermissionsWatcher_1", ABORTABLE), conf); - AUTH_B = TableAuthManager.getOrCreate(new ZooKeeperWatcher(conf, + AUTH_B = TableAuthManager.getOrCreate(new ZKWatcher(conf, "TestZKPermissionsWatcher_2", ABORTABLE), conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index f116b3074b..62f72078df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -76,7 +76,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.net.DNS; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; @@ -127,7 +127,7 @@ public class TestTokenAuthentication { private HBaseTestingUtility TEST_UTIL; private RpcServerInterface rpcServer; private InetSocketAddress isa; - private ZooKeeperWatcher zookeeper; + private ZKWatcher zookeeper; private Sleeper sleeper; private boolean started = false; private boolean aborted = false; @@ -220,7 +220,7 @@ public class TestTokenAuthentication { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return zookeeper; } @@ -261,7 +261,7 @@ public class TestTokenAuthentication { // ZK configuration must _not_ have hbase.security.authentication or it will require SASL auth Configuration zkConf = new Configuration(conf); zkConf.set(User.HBASE_SECURITY_CONF_KEY, "simple"); - this.zookeeper = new ZooKeeperWatcher(zkConf, TokenServer.class.getSimpleName(), + this.zookeeper = new ZKWatcher(zkConf, TokenServer.class.getSimpleName(), this, true); this.rpcServer.start(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java index 857cdd0ad1..ae4b19d274 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcher.java @@ -35,7 +35,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -71,13 +72,13 @@ public class TestZKSecretWatcher { private static class AuthenticationTokenSecretManagerForTest extends AuthenticationTokenSecretManager { private CountDownLatch latch = new CountDownLatch(1); - + public AuthenticationTokenSecretManagerForTest(Configuration conf, - ZooKeeperWatcher zk, String serverName, + ZKWatcher zk, String serverName, long keyUpdateInterval, long tokenMaxLifetime) { super(conf, zk, serverName, keyUpdateInterval, tokenMaxLifetime); } - + @Override synchronized boolean removeKey(Integer keyId) { boolean b = super.removeKey(keyId); @@ -86,19 +87,19 @@ public class TestZKSecretWatcher { } return b; } - + CountDownLatch getLatch() { return latch; } } - + @BeforeClass public static void setupBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zk = newZK(conf, "server1", new MockAbortable()); + ZKWatcher zk = newZK(conf, "server1", new MockAbortable()); AuthenticationTokenSecretManagerForTest[] tmp = new AuthenticationTokenSecretManagerForTest[2]; tmp[0] = new AuthenticationTokenSecretManagerForTest( conf, zk, "server1", 60*60*1000, 60*1000); @@ -179,7 +180,7 @@ public class TestZKSecretWatcher { // bring up a new slave Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zk = newZK(conf, "server3", new MockAbortable()); + ZKWatcher zk = newZK(conf, "server3", new MockAbortable()); KEY_SLAVE2 = new AuthenticationTokenSecretManager( conf, zk, "server3", 60*60*1000, 60*1000); KEY_SLAVE2.start(); @@ -233,7 +234,7 @@ public class TestZKSecretWatcher { assertTrue(newCurrent.getKeyId() > current.getKeyId()); // add another slave - ZooKeeperWatcher zk3 = newZK(conf, "server4", new MockAbortable()); + ZKWatcher zk3 = newZK(conf, "server4", new MockAbortable()); KEY_SLAVE3 = new AuthenticationTokenSecretManager( conf, zk3, "server4", 60*60*1000, 60*1000); KEY_SLAVE3.start(); @@ -275,10 +276,10 @@ public class TestZKSecretWatcher { assertTrue(newCurrent2.getKeyId() > current2.getKeyId()); } - private static ZooKeeperWatcher newZK(Configuration conf, String name, + private static ZKWatcher newZK(Configuration conf, String name, Abortable abort) throws Exception { Configuration copy = HBaseConfiguration.create(conf); - ZooKeeperWatcher zk = new ZooKeeperWatcher(copy, name, abort); + ZKWatcher zk = new ZKWatcher(copy, name, abort); return zk; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java index b2396819c8..0ae5b66dd4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestZKSecretWatcherRefreshKeys.java @@ -27,8 +27,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -54,7 +54,7 @@ public class TestZKSecretWatcherRefreshKeys { return abort; } } - + @BeforeClass public static void setupBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); @@ -66,28 +66,28 @@ public class TestZKSecretWatcherRefreshKeys { TEST_UTIL.shutdownMiniZKCluster(); } - private static ZooKeeperWatcher newZK(Configuration conf, String name, + private static ZKWatcher newZK(Configuration conf, String name, Abortable abort) throws Exception { Configuration copy = HBaseConfiguration.create(conf); - ZooKeeperWatcher zk = new ZooKeeperWatcher(copy, name, abort); + ZKWatcher zk = new ZKWatcher(copy, name, abort); return zk; } @Test public void testRefreshKeys() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zk = newZK(conf, "127.0.0.1", new MockAbortable()); - AuthenticationTokenSecretManager keyManager = - new AuthenticationTokenSecretManager(conf, zk, "127.0.0.1", + ZKWatcher zk = newZK(conf, "127.0.0.1", new MockAbortable()); + AuthenticationTokenSecretManager keyManager = + new AuthenticationTokenSecretManager(conf, zk, "127.0.0.1", 60 * 60 * 1000, 60 * 1000); ZKSecretWatcher watcher = new ZKSecretWatcher(conf, zk, keyManager); - ZKUtil.deleteChildrenRecursively(zk, watcher.getKeysParentZNode()); + ZooKeeperUtil.deleteChildrenRecursively(zk, watcher.getKeysParentZNode()); Integer[] keys = { 1, 2, 3, 4, 5, 6 }; for (Integer key : keys) { AuthenticationKey ak = new AuthenticationKey(key, System.currentTimeMillis() + 600 * 1000, null); - ZKUtil.createWithParents(zk, - ZKUtil.joinZNode(watcher.getKeysParentZNode(), key.toString()), + ZooKeeperUtil.createWithParents(zk, + ZooKeeperUtil.joinZNode(watcher.getKeysParentZNode(), key.toString()), Writables.getBytes(ak)); } Assert.assertNull(keyManager.getCurrentKey()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 398be48c47..c834be399f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Before; import org.junit.experimental.categories.Category; @@ -110,7 +110,7 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit TEST_UTIL = new HBaseTestingUtility(conf); TEST_UTIL.startMiniZKCluster(); MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster(); - zkw1 = new ZooKeeperWatcher(conf, "cluster1", null, true); + zkw1 = new ZKWatcher(conf, "cluster1", null, true); admin = TEST_UTIL.getAdmin(); // Base conf2 on conf1 so it gets the right zk cluster. @@ -125,7 +125,7 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit setVisibilityLabelServiceImpl(conf1, ExpAsStringVisibilityLabelServiceImpl.class); TEST_UTIL1 = new HBaseTestingUtility(conf1); TEST_UTIL1.setZkCluster(miniZK); - zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true); + zkw2 = new ZKWatcher(conf1, "cluster2", null, true); TEST_UTIL.startMiniCluster(1); // Wait for the labels table to become available diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index 99525e278e..6b6b074543 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -74,7 +74,7 @@ import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -109,8 +109,8 @@ public class TestVisibilityLabelsReplication { public final static byte[] fam = Bytes.toBytes("info"); public final static byte[] qual = Bytes.toBytes("qual"); public final static byte[] value = Bytes.toBytes("value"); - protected static ZooKeeperWatcher zkw1; - protected static ZooKeeperWatcher zkw2; + protected static ZKWatcher zkw1; + protected static ZKWatcher zkw2; protected static int expected[] = { 4, 6, 4, 0, 3 }; private static final String NON_VISIBILITY = "non-visibility"; protected static String[] expectedVisString = { @@ -161,7 +161,7 @@ public class TestVisibilityLabelsReplication { TEST_UTIL = new HBaseTestingUtility(conf); TEST_UTIL.startMiniZKCluster(); MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster(); - zkw1 = new ZooKeeperWatcher(conf, "cluster1", null, true); + zkw1 = new ZKWatcher(conf, "cluster1", null, true); admin = TEST_UTIL.getAdmin(); // Base conf2 on conf1 so it gets the right zk cluster. @@ -177,7 +177,7 @@ public class TestVisibilityLabelsReplication { USER1 = User.createUserForTesting(conf1, "user1", new String[] {}); TEST_UTIL1 = new HBaseTestingUtility(conf1); TEST_UTIL1.setZkCluster(miniZK); - zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true); + zkw2 = new ZKWatcher(conf1, "cluster2", null, true); TEST_UTIL.startMiniCluster(1); // Wait for the labels table to become available diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index db442192a5..decddb34b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -28,10 +28,10 @@ import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; /** * Basic mock Server for handler tests. @@ -39,10 +39,10 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; public class MockServer implements Server { private static final Log LOG = LogFactory.getLog(MockServer.class); final static ServerName NAME = ServerName.valueOf("MockServer", 123, -1); - + boolean stopped; boolean aborted; - final ZooKeeperWatcher zk; + final ZKWatcher zk; final HBaseTestingUtility htu; @SuppressWarnings("unused") @@ -66,7 +66,7 @@ public class MockServer implements Server { throws ZooKeeperConnectionException, IOException { this.htu = htu; this.zk = zkw? - new ZooKeeperWatcher(htu.getConfiguration(), NAME.toString(), this, true): + new ZKWatcher(htu.getConfiguration(), NAME.toString(), this, true): null; } @@ -94,7 +94,7 @@ public class MockServer implements Server { } @Override - public ZooKeeperWatcher getZooKeeper() { + public ZKWatcher getZooKeeper() { return this.zk; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java index a5cf0bd4a6..efc7b21884 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.experimental.categories.Category; @@ -162,7 +162,7 @@ public class ProcessBasedLocalHBaseCluster { startMaster(masterPort); } - ZKUtil.waitForBaseZNode(conf); + ZooKeeperUtil.waitForBaseZNode(conf); for (int rsPort : rsPorts) { startRegionServer(rsPort); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index 7463da101b..9029bfba0a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -86,7 +86,8 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -1493,7 +1494,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { Assert.assertEquals(1, replicationAdmin.getPeersCount()); // create replicator - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection); + ZKWatcher zkw = new ZKWatcher(conf, "Test Hbase Fsck", connection); ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection, zkw)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZK.java similarity index 93% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZK.java index e71210d067..81a6744387 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZK.java @@ -41,7 +41,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; @Category({MiscTests.class, MediumTests.class}) -public class TestRecoverableZooKeeper { +public class TestRecoverableZK { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -71,13 +71,13 @@ public class TestRecoverableZooKeeper { public void testSetDataVersionMismatchInLoop() throws Exception { String znode = "/hbase/splitWAL/9af7cfc9b15910a0b3d714bf40a3248f"; Configuration conf = TEST_UTIL.getConfiguration(); - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testSetDataVersionMismatchInLoop", + ZKWatcher zkw = new ZKWatcher(conf, "testSetDataVersionMismatchInLoop", abortable, true); String ensemble = ZKConfig.getZKQuorumServersString(conf); - RecoverableZooKeeper rzk = ZKUtil.connect(conf, ensemble, zkw); + RecoverableZK rzk = ZooKeeperUtil.connect(conf, ensemble, zkw); rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); rzk.setData(znode, "OPENING".getBytes(), 0); - Field zkField = RecoverableZooKeeper.class.getDeclaredField("zk"); + Field zkField = RecoverableZK.class.getDeclaredField("zk"); zkField.setAccessible(true); int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT); ZookeeperStub zkStub = new ZookeeperStub(ensemble, timeout, zkw); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java index 1faf8e52e0..87e92f54f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKLeaderManager.java @@ -61,12 +61,12 @@ public class TestZKLeaderManager { private static class MockLeader extends Thread implements Stoppable { private boolean stopped; - private ZooKeeperWatcher watcher; + private ZKWatcher watcher; private ZKLeaderManager zkLeader; private AtomicBoolean master = new AtomicBoolean(false); private int index; - public MockLeader(ZooKeeperWatcher watcher, int index) { + public MockLeader(ZKWatcher watcher, int index) { setDaemon(true); setName("TestZKLeaderManager-leader-" + index); this.index = index; @@ -83,7 +83,7 @@ public class TestZKLeaderManager { return index; } - public ZooKeeperWatcher getWatcher() { + public ZKWatcher getWatcher() { return watcher; } @@ -132,7 +132,7 @@ public class TestZKLeaderManager { MockAbortable abortable = new MockAbortable(); CANDIDATES = new MockLeader[3]; for (int i = 0; i < 3; i++) { - ZooKeeperWatcher watcher = newZK(conf, "server"+i, abortable); + ZKWatcher watcher = newZK(conf, "server"+i, abortable); CANDIDATES[i] = new MockLeader(watcher, i); CANDIDATES[i].start(); } @@ -150,7 +150,7 @@ public class TestZKLeaderManager { assertNotNull("Leader should exist", currentLeader); LOG.debug("Current leader index is "+currentLeader.getIndex()); - byte[] znodeData = ZKUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); + byte[] znodeData = ZooKeeperUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); assertNotNull("Leader znode should contain leader index", znodeData); assertTrue("Leader znode should not be empty", znodeData.length > 0); int storedIndex = Bytes.toInt(znodeData); @@ -168,7 +168,7 @@ public class TestZKLeaderManager { assertNotNull("New leader should exist after abdication", currentLeader); LOG.debug("New leader index is "+currentLeader.getIndex()); - znodeData = ZKUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); + znodeData = ZooKeeperUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); assertNotNull("Leader znode should contain leader index", znodeData); assertTrue("Leader znode should not be empty", znodeData.length > 0); storedIndex = Bytes.toInt(znodeData); @@ -186,7 +186,7 @@ public class TestZKLeaderManager { assertNotNull("New leader should exist after stop", currentLeader); LOG.debug("New leader index is "+currentLeader.getIndex()); - znodeData = ZKUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); + znodeData = ZooKeeperUtil.getData(currentLeader.getWatcher(), LEADER_ZNODE); assertNotNull("Leader znode should contain leader index", znodeData); assertTrue("Leader znode should not be empty", znodeData.length > 0); storedIndex = Bytes.toInt(znodeData); @@ -225,10 +225,10 @@ public class TestZKLeaderManager { return currentLeader; } - private static ZooKeeperWatcher newZK(Configuration conf, String name, + private static ZKWatcher newZK(Configuration conf, String name, Abortable abort) throws Exception { Configuration copy = HBaseConfiguration.create(conf); - ZooKeeperWatcher zk = new ZooKeeperWatcher(copy, name, abort); + ZKWatcher zk = new ZKWatcher(copy, name, abort); return zk; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java similarity index 91% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java index 368919ceb5..29a9d42ac9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMainServer.java @@ -31,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; @Category({MiscTests.class, SmallTests.class}) -public class TestZooKeeperMainServer { +public class TestZKMainServer { // ZKMS calls System.exit. Catch the call and prevent exit using trick described up in // http://stackoverflow.com/questions/309396/java-how-to-test-methods-that-call-system-exit protected static class ExitException extends SecurityException { @@ -72,20 +72,20 @@ public class TestZooKeeperMainServer { htu.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, 1000); htu.startMiniZKCluster(); try { - ZooKeeperWatcher zkw = htu.getZooKeeperWatcher(); + ZKWatcher zkw = htu.getZooKeeperWatcher(); String znode = "/testCommandLineWorks"; - ZKUtil.createWithParents(zkw, znode, HConstants.EMPTY_BYTE_ARRAY); - ZKUtil.checkExists(zkw, znode); + ZooKeeperUtil.createWithParents(zkw, znode, HConstants.EMPTY_BYTE_ARRAY); + ZooKeeperUtil.checkExists(zkw, znode); boolean exception = false; try { - ZooKeeperMainServer.main(new String [] {"-server", + ZKMainServer.main(new String [] {"-server", "localhost:" + htu.getZkCluster().getClientPort(), "delete", znode}); } catch (ExitException ee) { // ZKMS calls System.exit which should trigger this exception. exception = true; } assertTrue(exception); - assertEquals(-1, ZKUtil.checkExists(zkw, znode)); + assertEquals(-1, ZooKeeperUtil.checkExists(zkw, znode)); } finally { htu.shutdownMiniZKCluster(); System.setSecurityManager(null); // or save and restore original @@ -94,7 +94,7 @@ public class TestZooKeeperMainServer { @Test public void testHostPortParse() { - ZooKeeperMainServer parser = new ZooKeeperMainServer(); + ZKMainServer parser = new ZKMainServer(); Configuration c = HBaseConfiguration.create(); assertEquals("localhost:" + c.get(HConstants.ZOOKEEPER_CLIENT_PORT), parser.parse(c)); final String port = "1234"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java index b4ac59c746..c9b60f1366 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMulti.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil.ZKUtilOp; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Op; @@ -53,7 +53,7 @@ import org.junit.experimental.categories.Category; public class TestZKMulti { private static final Log LOG = LogFactory.getLog(TestZKMulti.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static ZooKeeperWatcher zkw = null; + private static ZKWatcher zkw = null; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -70,7 +70,7 @@ public class TestZKMulti { return false; } }; - zkw = new ZooKeeperWatcher(conf, + zkw = new ZKWatcher(conf, "TestZKMulti", abortable, true); } @@ -82,83 +82,83 @@ public class TestZKMulti { @Test (timeout=60000) public void testSimpleMulti() throws Exception { // null multi - ZKUtil.multiOrSequential(zkw, null, false); + ZooKeeperUtil.multiOrSequential(zkw, null, false); // empty multi - ZKUtil.multiOrSequential(zkw, new LinkedList<>(), false); + ZooKeeperUtil.multiOrSequential(zkw, new LinkedList<>(), false); // single create - String path = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSimpleMulti"); + String path = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testSimpleMulti"); LinkedList singleCreate = new LinkedList<>(); - singleCreate.add(ZKUtilOp.createAndFailSilent(path, new byte[0])); - ZKUtil.multiOrSequential(zkw, singleCreate, false); - assertTrue(ZKUtil.checkExists(zkw, path) != -1); + singleCreate.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path, new byte[0])); + ZooKeeperUtil.multiOrSequential(zkw, singleCreate, false); + assertTrue(ZooKeeperUtil.checkExists(zkw, path) != -1); // single setdata LinkedList singleSetData = new LinkedList<>(); byte [] data = Bytes.toBytes("foobar"); - singleSetData.add(ZKUtilOp.setData(path, data)); - ZKUtil.multiOrSequential(zkw, singleSetData, false); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path), data)); + singleSetData.add(ZooKeeperUtil.ZKUtilOp.setData(path, data)); + ZooKeeperUtil.multiOrSequential(zkw, singleSetData, false); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path), data)); // single delete LinkedList singleDelete = new LinkedList<>(); - singleDelete.add(ZKUtilOp.deleteNodeFailSilent(path)); - ZKUtil.multiOrSequential(zkw, singleDelete, false); - assertTrue(ZKUtil.checkExists(zkw, path) == -1); + singleDelete.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(path)); + ZooKeeperUtil.multiOrSequential(zkw, singleDelete, false); + assertTrue(ZooKeeperUtil.checkExists(zkw, path) == -1); } @Test (timeout=60000) public void testComplexMulti() throws Exception { - String path1 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti1"); - String path2 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti2"); - String path3 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti3"); - String path4 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti4"); - String path5 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti5"); - String path6 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti6"); + String path1 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti1"); + String path2 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti2"); + String path3 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti3"); + String path4 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti4"); + String path5 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti5"); + String path6 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testComplexMulti6"); // create 4 nodes that we'll setData on or delete later LinkedList create4Nodes = new LinkedList<>(); - create4Nodes.add(ZKUtilOp.createAndFailSilent(path1, Bytes.toBytes(path1))); - create4Nodes.add(ZKUtilOp.createAndFailSilent(path2, Bytes.toBytes(path2))); - create4Nodes.add(ZKUtilOp.createAndFailSilent(path3, Bytes.toBytes(path3))); - create4Nodes.add(ZKUtilOp.createAndFailSilent(path4, Bytes.toBytes(path4))); - ZKUtil.multiOrSequential(zkw, create4Nodes, false); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1), Bytes.toBytes(path1))); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path2), Bytes.toBytes(path2))); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path3), Bytes.toBytes(path3))); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path4), Bytes.toBytes(path4))); + create4Nodes.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path1, Bytes.toBytes(path1))); + create4Nodes.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path2, Bytes.toBytes(path2))); + create4Nodes.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path3, Bytes.toBytes(path3))); + create4Nodes.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path4, Bytes.toBytes(path4))); + ZooKeeperUtil.multiOrSequential(zkw, create4Nodes, false); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path1), Bytes.toBytes(path1))); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path2), Bytes.toBytes(path2))); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path3), Bytes.toBytes(path3))); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path4), Bytes.toBytes(path4))); // do multiple of each operation (setData, delete, create) LinkedList ops = new LinkedList<>(); // setData - ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); - ops.add(ZKUtilOp.setData(path2, Bytes.add(Bytes.toBytes(path2), Bytes.toBytes(path2)))); + ops.add(ZooKeeperUtil.ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); + ops.add(ZooKeeperUtil.ZKUtilOp.setData(path2, Bytes.add(Bytes.toBytes(path2), Bytes.toBytes(path2)))); // delete - ops.add(ZKUtilOp.deleteNodeFailSilent(path3)); - ops.add(ZKUtilOp.deleteNodeFailSilent(path4)); + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(path3)); + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(path4)); // create - ops.add(ZKUtilOp.createAndFailSilent(path5, Bytes.toBytes(path5))); - ops.add(ZKUtilOp.createAndFailSilent(path6, Bytes.toBytes(path6))); - ZKUtil.multiOrSequential(zkw, ops, false); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1), + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path5, Bytes.toBytes(path5))); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path6, Bytes.toBytes(path6))); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path1), Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path2), + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path2), Bytes.add(Bytes.toBytes(path2), Bytes.toBytes(path2)))); - assertTrue(ZKUtil.checkExists(zkw, path3) == -1); - assertTrue(ZKUtil.checkExists(zkw, path4) == -1); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path5), Bytes.toBytes(path5))); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path6), Bytes.toBytes(path6))); + assertTrue(ZooKeeperUtil.checkExists(zkw, path3) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, path4) == -1); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path5), Bytes.toBytes(path5))); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path6), Bytes.toBytes(path6))); } @Test (timeout=60000) public void testSingleFailure() throws Exception { // try to delete a node that doesn't exist boolean caughtNoNode = false; - String path = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureZ"); + String path = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureZ"); LinkedList ops = new LinkedList<>(); - ops.add(ZKUtilOp.deleteNodeFailSilent(path)); + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(path)); try { - ZKUtil.multiOrSequential(zkw, ops, false); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); } catch (KeeperException.NoNodeException nne) { caughtNoNode = true; } @@ -167,9 +167,9 @@ public class TestZKMulti { // try to setData on a node that doesn't exist caughtNoNode = false; ops = new LinkedList<>(); - ops.add(ZKUtilOp.setData(path, Bytes.toBytes(path))); + ops.add(ZooKeeperUtil.ZKUtilOp.setData(path, Bytes.toBytes(path))); try { - ZKUtil.multiOrSequential(zkw, ops, false); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); } catch (KeeperException.NoNodeException nne) { caughtNoNode = true; } @@ -178,10 +178,10 @@ public class TestZKMulti { // try to create on a node that already exists boolean caughtNodeExists = false; ops = new LinkedList<>(); - ops.add(ZKUtilOp.createAndFailSilent(path, Bytes.toBytes(path))); - ZKUtil.multiOrSequential(zkw, ops, false); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path, Bytes.toBytes(path))); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); try { - ZKUtil.multiOrSequential(zkw, ops, false); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); } catch (KeeperException.NodeExistsException nee) { caughtNodeExists = true; } @@ -191,107 +191,107 @@ public class TestZKMulti { @Test (timeout=60000) public void testSingleFailureInMulti() throws Exception { // try a multi where all but one operation succeeds - String pathA = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiA"); - String pathB = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiB"); - String pathC = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiC"); + String pathA = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiA"); + String pathB = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiB"); + String pathC = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testSingleFailureInMultiC"); LinkedList ops = new LinkedList<>(); - ops.add(ZKUtilOp.createAndFailSilent(pathA, Bytes.toBytes(pathA))); - ops.add(ZKUtilOp.createAndFailSilent(pathB, Bytes.toBytes(pathB))); - ops.add(ZKUtilOp.deleteNodeFailSilent(pathC)); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathA, Bytes.toBytes(pathA))); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathB, Bytes.toBytes(pathB))); + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(pathC)); boolean caughtNoNode = false; try { - ZKUtil.multiOrSequential(zkw, ops, false); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); } catch (KeeperException.NoNodeException nne) { caughtNoNode = true; } assertTrue(caughtNoNode); // assert that none of the operations succeeded - assertTrue(ZKUtil.checkExists(zkw, pathA) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathB) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathC) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathA) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathB) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathC) == -1); } @Test (timeout=60000) public void testMultiFailure() throws Exception { - String pathX = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureX"); - String pathY = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureY"); - String pathZ = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureZ"); + String pathX = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureX"); + String pathY = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureY"); + String pathZ = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureZ"); // create X that we will use to fail create later LinkedList ops = new LinkedList<>(); - ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); - ZKUtil.multiOrSequential(zkw, ops, false); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); // fail one of each create ,setData, delete - String pathV = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureV"); - String pathW = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureW"); + String pathV = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureV"); + String pathW = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "testMultiFailureW"); ops = new LinkedList<>(); - ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- already exists - ops.add(ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist - ops.add(ZKUtilOp.deleteNodeFailSilent(pathZ)); // fail -- doesn't exist - ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathV))); // pass - ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathW))); // pass + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- already exists + ops.add(ZooKeeperUtil.ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(pathZ)); // fail -- doesn't exist + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathV))); // pass + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathW))); // pass boolean caughtNodeExists = false; try { - ZKUtil.multiOrSequential(zkw, ops, false); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); } catch (KeeperException.NodeExistsException nee) { // check first operation that fails throws exception caughtNodeExists = true; } assertTrue(caughtNodeExists); // check that no modifications were made - assertFalse(ZKUtil.checkExists(zkw, pathX) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathY) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathZ) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathW) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathV) == -1); + assertFalse(ZooKeeperUtil.checkExists(zkw, pathX) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathY) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathZ) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathW) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathV) == -1); // test that with multiple failures, throws an exception corresponding to first failure in list ops = new LinkedList<>(); - ops.add(ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist - ops.add(ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- exists + ops.add(ZooKeeperUtil.ZKUtilOp.setData(pathY, Bytes.toBytes(pathY))); // fail -- doesn't exist + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(pathX, Bytes.toBytes(pathX))); // fail -- exists boolean caughtNoNode = false; try { - ZKUtil.multiOrSequential(zkw, ops, false); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); } catch (KeeperException.NoNodeException nne) { // check first operation that fails throws exception caughtNoNode = true; } assertTrue(caughtNoNode); // check that no modifications were made - assertFalse(ZKUtil.checkExists(zkw, pathX) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathY) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathZ) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathW) == -1); - assertTrue(ZKUtil.checkExists(zkw, pathV) == -1); + assertFalse(ZooKeeperUtil.checkExists(zkw, pathX) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathY) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathZ) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathW) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, pathV) == -1); } @Test (timeout=60000) public void testRunSequentialOnMultiFailure() throws Exception { - String path1 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential1"); - String path2 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential2"); - String path3 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential3"); - String path4 = ZKUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential4"); + String path1 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential1"); + String path2 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential2"); + String path3 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential3"); + String path4 = ZooKeeperUtil.joinZNode(zkw.znodePaths.baseZNode, "runSequential4"); // create some nodes that we will use later LinkedList ops = new LinkedList<>(); - ops.add(ZKUtilOp.createAndFailSilent(path1, Bytes.toBytes(path1))); - ops.add(ZKUtilOp.createAndFailSilent(path2, Bytes.toBytes(path2))); - ZKUtil.multiOrSequential(zkw, ops, false); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path1, Bytes.toBytes(path1))); + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path2, Bytes.toBytes(path2))); + ZooKeeperUtil.multiOrSequential(zkw, ops, false); // test that, even with operations that fail, the ones that would pass will pass // with runSequentialOnMultiFailure ops = new LinkedList<>(); - ops.add(ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); // pass - ops.add(ZKUtilOp.deleteNodeFailSilent(path2)); // pass - ops.add(ZKUtilOp.deleteNodeFailSilent(path3)); // fail -- node doesn't exist - ops.add(ZKUtilOp.createAndFailSilent(path4, + ops.add(ZooKeeperUtil.ZKUtilOp.setData(path1, Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); // pass + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(path2)); // pass + ops.add(ZooKeeperUtil.ZKUtilOp.deleteNodeFailSilent(path3)); // fail -- node doesn't exist + ops.add(ZooKeeperUtil.ZKUtilOp.createAndFailSilent(path4, Bytes.add(Bytes.toBytes(path4), Bytes.toBytes(path4)))); // pass - ZKUtil.multiOrSequential(zkw, ops, true); - assertTrue(Bytes.equals(ZKUtil.getData(zkw, path1), + ZooKeeperUtil.multiOrSequential(zkw, ops, true); + assertTrue(Bytes.equals(ZooKeeperUtil.getData(zkw, path1), Bytes.add(Bytes.toBytes(path1), Bytes.toBytes(path1)))); - assertTrue(ZKUtil.checkExists(zkw, path2) == -1); - assertTrue(ZKUtil.checkExists(zkw, path3) == -1); - assertFalse(ZKUtil.checkExists(zkw, path4) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, path2) == -1); + assertTrue(ZooKeeperUtil.checkExists(zkw, path3) == -1); + assertFalse(ZooKeeperUtil.checkExists(zkw, path4) == -1); } /** @@ -303,11 +303,11 @@ public class TestZKMulti { String parentZNode = "/testRootMulti"; createZNodeTree(parentZNode); - ZKUtil.deleteChildrenRecursivelyMultiOrSequential(zkw, true, parentZNode); + ZooKeeperUtil.deleteChildrenRecursivelyMultiOrSequential(zkw, true, parentZNode); assertTrue("Wrongly deleted parent znode!", - ZKUtil.checkExists(zkw, parentZNode) > -1); - List children = zkw.getRecoverableZooKeeper().getChildren( + ZooKeeperUtil.checkExists(zkw, parentZNode) > -1); + List children = zkw.getRecoverableZK().getChildren( parentZNode, false); assertTrue("Failed to delete child znodes!", 0 == children.size()); } @@ -321,8 +321,8 @@ public class TestZKMulti { String parentZNode = "/testdeleteNodeRecursivelyMulti"; createZNodeTree(parentZNode); - ZKUtil.deleteNodeRecursively(zkw, parentZNode); - assertTrue("Parent znode should be deleted.", ZKUtil.checkExists(zkw, parentZNode) == -1); + ZooKeeperUtil.deleteNodeRecursively(zkw, parentZNode); + assertTrue("Parent znode should be deleted.", ZooKeeperUtil.checkExists(zkw, parentZNode) == -1); } @Test(timeout = 60000) @@ -334,11 +334,11 @@ public class TestZKMulti { createZNodeTree(parentZNode2); createZNodeTree(parentZNode3); - ZKUtil.deleteNodeRecursivelyMultiOrSequential(zkw, false, parentZNode1, parentZNode2, + ZooKeeperUtil.deleteNodeRecursivelyMultiOrSequential(zkw, false, parentZNode1, parentZNode2, parentZNode3); - assertTrue("Parent znode 1 should be deleted.", ZKUtil.checkExists(zkw, parentZNode1) == -1); - assertTrue("Parent znode 2 should be deleted.", ZKUtil.checkExists(zkw, parentZNode2) == -1); - assertTrue("Parent znode 3 should be deleted.", ZKUtil.checkExists(zkw, parentZNode3) == -1); + assertTrue("Parent znode 1 should be deleted.", ZooKeeperUtil.checkExists(zkw, parentZNode1) == -1); + assertTrue("Parent znode 2 should be deleted.", ZooKeeperUtil.checkExists(zkw, parentZNode2) == -1); + assertTrue("Parent znode 3 should be deleted.", ZooKeeperUtil.checkExists(zkw, parentZNode3) == -1); } @Test(timeout = 60000) @@ -350,19 +350,19 @@ public class TestZKMulti { createZNodeTree(parentZNode2); createZNodeTree(parentZNode3); - ZKUtil.deleteChildrenRecursivelyMultiOrSequential(zkw, true, parentZNode1, parentZNode2, + ZooKeeperUtil.deleteChildrenRecursivelyMultiOrSequential(zkw, true, parentZNode1, parentZNode2, parentZNode3); - assertTrue("Wrongly deleted parent znode 1!", ZKUtil.checkExists(zkw, parentZNode1) > -1); - List children = zkw.getRecoverableZooKeeper().getChildren(parentZNode1, false); + assertTrue("Wrongly deleted parent znode 1!", ZooKeeperUtil.checkExists(zkw, parentZNode1) > -1); + List children = zkw.getRecoverableZK().getChildren(parentZNode1, false); assertTrue("Failed to delete child znodes of parent znode 1!", 0 == children.size()); - assertTrue("Wrongly deleted parent znode 2!", ZKUtil.checkExists(zkw, parentZNode2) > -1); - children = zkw.getRecoverableZooKeeper().getChildren(parentZNode2, false); + assertTrue("Wrongly deleted parent znode 2!", ZooKeeperUtil.checkExists(zkw, parentZNode2) > -1); + children = zkw.getRecoverableZK().getChildren(parentZNode2, false); assertTrue("Failed to delete child znodes of parent znode 1!", 0 == children.size()); - assertTrue("Wrongly deleted parent znode 3!", ZKUtil.checkExists(zkw, parentZNode3) > -1); - children = zkw.getRecoverableZooKeeper().getChildren(parentZNode3, false); + assertTrue("Wrongly deleted parent znode 3!", ZooKeeperUtil.checkExists(zkw, parentZNode3) > -1); + children = zkw.getRecoverableZK().getChildren(parentZNode3, false); assertTrue("Failed to delete child znodes of parent znode 1!", 0 == children.size()); } @@ -387,6 +387,6 @@ public class TestZKMulti { } level++; } - zkw.getRecoverableZooKeeper().multi(opList); + zkw.getRecoverableZK().multi(opList); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java index 05ad73e974..1dcc0d9b2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java @@ -49,9 +49,9 @@ public class TestZooKeeperACL { private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static ZooKeeperWatcher zkw; + private static ZKWatcher zkw; private static boolean secureZKAvailable; - + @BeforeClass public static void setUpBeforeClass() throws Exception { File saslConfFile = File.createTempFile("tmp", "jaas.conf"); @@ -76,7 +76,7 @@ public class TestZooKeeperACL { TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.maxClientCnxns", 1000); // If Hadoop is missing HADOOP-7070 the cluster will fail to start due to - // the JAAS configuration required by ZK being clobbered by Hadoop + // the JAAS configuration required by ZK being clobbered by Hadoop try { TEST_UTIL.startMiniCluster(); } catch (IOException e) { @@ -84,7 +84,7 @@ public class TestZooKeeperACL { secureZKAvailable = false; return; } - zkw = new ZooKeeperWatcher( + zkw = new ZKWatcher( new Configuration(TEST_UTIL.getConfiguration()), TestZooKeeper.class.getName(), null); } @@ -112,7 +112,7 @@ public class TestZooKeeperACL { } /** - * Create a node and check its ACL. When authentication is enabled on + * Create a node and check its ACL. When authentication is enabled on * ZooKeeper, all nodes (except /hbase/root-region-server, /hbase/master * and /hbase/hbaseid) should be created so that only the hbase server user * (master or region server user) that created them can access them, and @@ -129,7 +129,7 @@ public class TestZooKeeperACL { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() + List acls = zkw.getRecoverableZK().getZooKeeper() .getACL("/hbase", new Stat()); assertEquals(acls.size(),1); assertEquals(acls.get(0).getId().getScheme(),"sasl"); @@ -148,7 +148,7 @@ public class TestZooKeeperACL { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() + List acls = zkw.getRecoverableZK().getZooKeeper() .getACL("/hbase/root-region-server", new Stat()); assertEquals(acls.size(),2); @@ -185,7 +185,7 @@ public class TestZooKeeperACL { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() + List acls = zkw.getRecoverableZK().getZooKeeper() .getACL("/hbase/master", new Stat()); assertEquals(acls.size(),2); @@ -221,7 +221,7 @@ public class TestZooKeeperACL { return; } - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() + List acls = zkw.getRecoverableZK().getZooKeeper() .getACL("/hbase/hbaseid", new Stat()); assertEquals(acls.size(),2); @@ -256,8 +256,8 @@ public class TestZooKeeperACL { return; } - ZKUtil.createWithParents(zkw, "/testACLNode"); - List acls = zkw.getRecoverableZooKeeper().getZooKeeper() + ZooKeeperUtil.createWithParents(zkw, "/testACLNode"); + List acls = zkw.getRecoverableZK().getZooKeeper() .getACL("/testACLNode", new Stat()); assertEquals(acls.size(),1); assertEquals(acls.get(0).getId().getScheme(),"sasl"); @@ -270,7 +270,7 @@ public class TestZooKeeperACL { */ @Test public void testIsZooKeeperSecure() throws Exception { - boolean testJaasConfig = ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); + boolean testJaasConfig = ZooKeeperUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); assertEquals(testJaasConfig, secureZKAvailable); // Define Jaas configuration without ZooKeeper Jaas config File saslConfFile = File.createTempFile("tmp", "fakeJaas.conf"); @@ -281,11 +281,11 @@ public class TestZooKeeperACL { System.setProperty("java.security.auth.login.config", saslConfFile.getAbsolutePath()); - testJaasConfig = ZKUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); + testJaasConfig = ZooKeeperUtil.isSecureZooKeeper(new Configuration(TEST_UTIL.getConfiguration())); assertEquals(testJaasConfig, false); saslConfFile.delete(); } - + /** * Check if Programmatic way of setting zookeeper security settings is valid. */ @@ -295,13 +295,13 @@ public class TestZooKeeperACL { javax.security.auth.login.Configuration.setConfiguration(new DummySecurityConfiguration()); Configuration config = new Configuration(HBaseConfiguration.create()); - boolean testJaasConfig = ZKUtil.isSecureZooKeeper(config); + boolean testJaasConfig = ZooKeeperUtil.isSecureZooKeeper(config); assertEquals(testJaasConfig, false); // Now set authentication scheme to Kerberos still it should return false // because no configuration set config.set("hbase.security.authentication", "kerberos"); - testJaasConfig = ZKUtil.isSecureZooKeeper(config); + testJaasConfig = ZooKeeperUtil.isSecureZooKeeper(config); assertEquals(testJaasConfig, false); // Now set programmatic options related to security @@ -309,7 +309,7 @@ public class TestZooKeeperACL { config.set(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL, "dummy"); config.set(HConstants.ZK_SERVER_KEYTAB_FILE, "/dummy/file"); config.set(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, "dummy"); - testJaasConfig = ZKUtil.isSecureZooKeeper(config); + testJaasConfig = ZooKeeperUtil.isSecureZooKeeper(config); assertEquals(true, testJaasConfig); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperNodeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperNodeTracker.java index 87e2309db9..c782d6b665 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperNodeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperNodeTracker.java @@ -71,7 +71,7 @@ public class TestZooKeeperNodeTracker { */ @Test public void testInterruptible() throws IOException, InterruptedException { Abortable abortable = new StubAbortable(); - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testInterruptible", abortable); final TestTracker tracker = new TestTracker(zk, "/xyz", abortable); tracker.start(); @@ -95,12 +95,12 @@ public class TestZooKeeperNodeTracker { @Test public void testNodeTracker() throws Exception { Abortable abortable = new StubAbortable(); - ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zk = new ZKWatcher(TEST_UTIL.getConfiguration(), "testNodeTracker", abortable); - ZKUtil.createAndFailSilent(zk, zk.znodePaths.baseZNode); + ZooKeeperUtil.createAndFailSilent(zk, zk.znodePaths.baseZNode); final String node = - ZKUtil.joinZNode(zk.znodePaths.baseZNode, new Long(rand.nextLong()).toString()); + ZooKeeperUtil.joinZNode(zk.znodePaths.baseZNode, new Long(rand.nextLong()).toString()); final byte [] dataOne = Bytes.toBytes("dataOne"); final byte [] dataTwo = Bytes.toBytes("dataTwo"); @@ -213,14 +213,14 @@ public class TestZooKeeperNodeTracker { TestTracker tracker; boolean hasData; - public WaitToGetDataThread(ZooKeeperWatcher zk, String node) { + public WaitToGetDataThread(ZKWatcher zk, String node) { tracker = new TestTracker(zk, node, null); tracker.start(); zk.registerListener(tracker); hasData = false; } - public WaitToGetDataThread(ZooKeeperWatcher zk, String node, + public WaitToGetDataThread(ZKWatcher zk, String node, TestTracker tracker) { this.tracker = tracker; hasData = false; @@ -239,14 +239,14 @@ public class TestZooKeeperNodeTracker { } } - public static class TestTracker extends ZooKeeperNodeTracker { - public TestTracker(ZooKeeperWatcher watcher, String node, + public static class TestTracker extends ZKNodeTracker { + public TestTracker(ZKWatcher watcher, String node, Abortable abortable) { super(watcher, node, abortable); } } - public static class TestingZKListener extends ZooKeeperListener { + public static class TestingZKListener extends ZKListener { private static final Log LOG = LogFactory.getLog(NodeDeletionListener.class); private Semaphore deletedLock; @@ -254,7 +254,7 @@ public class TestZooKeeperNodeTracker { private Semaphore changedLock; private String node; - public TestingZKListener(ZooKeeperWatcher watcher, String node) { + public TestingZKListener(ZKWatcher watcher, String node) { super(watcher); deletedLock = new Semaphore(0); createdLock = new Semaphore(0); @@ -302,12 +302,12 @@ public class TestZooKeeperNodeTracker { public static class StubAbortable implements Abortable { @Override public void abort(final String msg, final Throwable t) {} - + @Override public boolean isAborted() { return false; } - + } public static class StubWatcher implements Watcher { @@ -317,31 +317,31 @@ public class TestZooKeeperNodeTracker { @Test public void testCleanZNode() throws Exception { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), + ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(), "testNodeTracker", new TestZooKeeperNodeTracker.StubAbortable()); final ServerName sn = ServerName.valueOf("127.0.0.1:52", 45L); - ZKUtil.createAndFailSilent(zkw, + ZooKeeperUtil.createAndFailSilent(zkw, TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT)); final String nodeName = zkw.znodePaths.masterAddressZNode; // Check that we manage the case when there is no data - ZKUtil.createAndFailSilent(zkw, nodeName); + ZooKeeperUtil.createAndFailSilent(zkw, nodeName); MasterAddressTracker.deleteIfEquals(zkw, sn.toString()); - Assert.assertFalse(ZKUtil.getData(zkw, nodeName) == null); + Assert.assertFalse(ZooKeeperUtil.getData(zkw, nodeName) == null); // Check that we don't delete if we're not supposed to - ZKUtil.setData(zkw, nodeName, MasterAddressTracker.toByteArray(sn, 0)); + ZooKeeperUtil.setData(zkw, nodeName, MasterAddressTracker.toByteArray(sn, 0)); MasterAddressTracker.deleteIfEquals(zkw, ServerName.valueOf("127.0.0.2:52", 45L).toString()); - Assert.assertFalse(ZKUtil.getData(zkw, nodeName) == null); + Assert.assertFalse(ZooKeeperUtil.getData(zkw, nodeName) == null); // Check that we delete when we're supposed to - ZKUtil.setData(zkw, nodeName,MasterAddressTracker.toByteArray(sn, 0)); + ZooKeeperUtil.setData(zkw, nodeName,MasterAddressTracker.toByteArray(sn, 0)); MasterAddressTracker.deleteIfEquals(zkw, sn.toString()); - Assert.assertTrue( ZKUtil.getData(zkw, nodeName)== null ); + Assert.assertTrue( ZooKeeperUtil.getData(zkw, nodeName)== null ); // Check that we support the case when the znode does not exist MasterAddressTracker.deleteIfEquals(zkw, sn.toString()); // must not throw an exception diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 581ccb3954..d5df633baa 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -322,12 +322,12 @@ module Hbase #---------------------------------------------------------------------------------------------- # Returns ZooKeeper status dump def zk_dump - @zk_wrapper = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new( + @zk_wrapper = org.apache.hadoop.hbase.zookeeper.ZKWatcher.new( @admin.getConfiguration, 'admin', nil ) - zk = @zk_wrapper.getRecoverableZooKeeper.getZooKeeper + zk = @zk_wrapper.getRecoverableZK.getZooKeeper @zk_main = org.apache.zookeeper.ZooKeeperMain.new(zk) org.apache.hadoop.hbase.zookeeper.ZKUtil.dump(@zk_wrapper) end diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index 320ec7c7ae..6cd9e9d2ee 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -635,7 +635,7 @@ EOF perms.to_java_bytes )) end - op.setACL(map) + op.setAL(map) end def set_cell_visibility(oprattr, visibility) diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml new file mode 100644 index 0000000000..de0c7958c0 --- /dev/null +++ b/hbase-zookeeper/pom.xml @@ -0,0 +1,417 @@ + + + + 4.0.0 + + hbase-build-configuration + org.apache.hbase + 3.0.0-SNAPSHOT + ../hbase-build-configuration + + hbase-zookeeper + Apache HBase - Zookeeper + Zookeeper Helpers for HBase + + + + + + + src/test/resources/META-INF/ + META-INF/ + + NOTICE + + true + + + src/test/resources + + **/** + + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + package + + jar + test-jar + + + + + + + org.apache.maven.plugins + maven-eclipse-plugin + + + org.jamon.project.jamonnature + + + org.jamon.project.templateBuilder + org.eclipse.jdt.core.javabuilder + org.jamon.project.markerUpdater + + + + .settings/org.jamon.prefs + # now + eclipse.preferences.version=1 + templateSourceDir=src/main/jamon + templateOutputDir=target/generated-jamon + + + + + + + + org.codehaus.mojo + findbugs-maven-plugin + + + + maven-surefire-plugin + + + + listener + org.apache.hadoop.hbase.ResourceCheckerJUnitListener + + + + target/test-classes/webapps + org.apache.hadoop.hbase.shaded. + + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + + + + + + org.apache.maven.plugins + maven-antrun-plugin + [1.6,) + + run + + + + + false + true + + + + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + + + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + org.apache.hbase.thirdparty + hbase-shaded-miscellaneous + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.hbase + hbase-protocol-shaded + + + org.apache.hbase + hbase-hadoop-compat + + + org.apache.hbase + hbase-hadoop2-compat + + + + org.apache.hadoop + hadoop-mapreduce-client-core + + + + + + com.github.stephenc.findbugs + findbugs-annotations + true + + + org.apache.commons + commons-lang3 + + + commons-logging + commons-logging + + + log4j + log4j + + + org.apache.zookeeper + zookeeper + + + + org.apache.htrace + htrace-core + + + + junit + junit + test + + + org.mockito + mockito-all + test + + + + + + apache-release + + + + org.apache.maven.plugins + maven-resources-plugin + + + license-javadocs + prepare-package + + copy-resources + + + ${project.build.directory}/apidocs + + + src/main/javadoc/META-INF/ + META-INF/ + + LICENSE + NOTICE + + true + + + + + + + + + + + + skipZooKeeperTests + + + skipZooKeeperTests + + + + true + true + + + + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-auth + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + ${hadoop-three.version} + + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-auth + + + + + + maven-dependency-plugin + + + create-mrapp-generated-classpath + generate-test-resources + + build-classpath + + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + + + + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java similarity index 82% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java index fd60765a42..d35b1d1264 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java @@ -20,21 +20,21 @@ package org.apache.hadoop.hbase.zookeeper; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; import org.apache.zookeeper.KeeperException; /** * Tracker on cluster settings up in zookeeper. - * This is not related to {@link org.apache.hadoop.hbase.ClusterStatus}. That class - * is a data structure that holds snapshot of current view on cluster. This class + * This is not related to org.apache.hadoop.hbase.ClusterStatus hbase-client. That class + * is a data structure that holds snapshot of current view on cluster. This class * is about tracking cluster attributes up in zookeeper. * */ @InterfaceAudience.Private -public class ClusterStatusTracker extends ZooKeeperNodeTracker { +public class ClusterStatusTracker extends ZKNodeTracker { private static final Log LOG = LogFactory.getLog(ClusterStatusTracker.class); /** @@ -45,7 +45,7 @@ public class ClusterStatusTracker extends ZooKeeperNodeTracker { * @param watcher * @param abortable */ - public ClusterStatusTracker(ZooKeeperWatcher watcher, Abortable abortable) { + public ClusterStatusTracker(ZKWatcher watcher, Abortable abortable) { super(watcher, watcher.znodePaths.clusterStateZNode, abortable); } @@ -66,9 +66,9 @@ public class ClusterStatusTracker extends ZooKeeperNodeTracker { throws KeeperException { byte [] upData = toByteArray(); try { - ZKUtil.createAndWatch(watcher, watcher.znodePaths.clusterStateZNode, upData); + ZooKeeperUtil.createAndWatch(watcher, watcher.znodePaths.clusterStateZNode, upData); } catch(KeeperException.NodeExistsException nee) { - ZKUtil.setData(watcher, watcher.znodePaths.clusterStateZNode, upData); + ZooKeeperUtil.setData(watcher, watcher.znodePaths.clusterStateZNode, upData); } } @@ -79,7 +79,7 @@ public class ClusterStatusTracker extends ZooKeeperNodeTracker { public void setClusterDown() throws KeeperException { try { - ZKUtil.deleteNode(watcher, watcher.znodePaths.clusterStateZNode); + ZooKeeperUtil.deleteNode(watcher, watcher.znodePaths.clusterStateZNode); } catch(KeeperException.NoNodeException nne) { LOG.warn("Attempted to set cluster as down but already down, cluster " + "state node (" + watcher.znodePaths.clusterStateZNode + ") not found"); @@ -94,6 +94,6 @@ public class ClusterStatusTracker extends ZooKeeperNodeTracker { ZooKeeperProtos.ClusterUp.Builder builder = ZooKeeperProtos.ClusterUp.newBuilder(); builder.setStartDate(new java.util.Date().toString()); - return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + return ProtobufMagic.prependPBMagic(builder.build().toByteArray()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java similarity index 93% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java index c58a840f2b..2c3b90a6fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/DeletionListener.java @@ -30,7 +30,7 @@ import org.apache.zookeeper.KeeperException; * A ZooKeeper watcher meant to detect deletions of ZNodes. */ @InterfaceAudience.Private -public class DeletionListener extends ZooKeeperListener { +public class DeletionListener extends ZKListener { private static final Log LOG = LogFactory.getLog(DeletionListener.class); @@ -46,7 +46,7 @@ public class DeletionListener extends ZooKeeperListener { * be deleted. * @param deletedLatch Count down on this latch when deletion has occurred. */ - public DeletionListener(ZooKeeperWatcher zkWatcher, String pathToWatch, + public DeletionListener(ZKWatcher zkWatcher, String pathToWatch, CountDownLatch deletedLatch) { super(zkWatcher); this.pathToWatch = pathToWatch; @@ -78,7 +78,7 @@ public class DeletionListener extends ZooKeeperListener { return; } try { - if (!(ZKUtil.setWatchIfNodeExists(watcher, pathToWatch))) { + if (!(ZooKeeperUtil.setWatchIfNodeExists(watcher, pathToWatch))) { deletedLatch.countDown(); } } catch (KeeperException ex) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java similarity index 100% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java similarity index 98% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index 5f3904aabc..62921f3bab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -71,7 +71,7 @@ public class HQuorumPeer { zkConfig.parseProperties(zkProperties); // login the zookeeper server principal (if using security) - ZKUtil.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, + ZooKeeperUtil.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, zkConfig.getClientPortAddress().getHostName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java similarity index 100% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/InstancePending.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java similarity index 82% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java index 527bc17c4b..13219708a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/LoadBalancerTracker.java @@ -21,10 +21,11 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.protobuf.ProtobufHelpers; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.LoadBalancerProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; @@ -33,10 +34,10 @@ import org.apache.zookeeper.KeeperException; * Tracks the load balancer state up in ZK */ @InterfaceAudience.Private -public class LoadBalancerTracker extends ZooKeeperNodeTracker { +public class LoadBalancerTracker extends ZKNodeTracker { private static final Log LOG = LogFactory.getLog(LoadBalancerTracker.class); - public LoadBalancerTracker(ZooKeeperWatcher watcher, + public LoadBalancerTracker(ZKWatcher watcher, Abortable abortable) { super(watcher, watcher.znodePaths.balancerZNode, abortable); } @@ -64,9 +65,9 @@ public class LoadBalancerTracker extends ZooKeeperNodeTracker { public void setBalancerOn(boolean balancerOn) throws KeeperException { byte [] upData = toByteArray(balancerOn); try { - ZKUtil.setData(watcher, watcher.znodePaths.balancerZNode, upData); + ZooKeeperUtil.setData(watcher, watcher.znodePaths.balancerZNode, upData); } catch(KeeperException.NoNodeException nne) { - ZKUtil.createAndWatch(watcher, watcher.znodePaths.balancerZNode, upData); + ZooKeeperUtil.createAndWatch(watcher, watcher.znodePaths.balancerZNode, upData); } super.nodeDataChanged(watcher.znodePaths.balancerZNode); } @@ -75,17 +76,17 @@ public class LoadBalancerTracker extends ZooKeeperNodeTracker { LoadBalancerProtos.LoadBalancerState.Builder builder = LoadBalancerProtos.LoadBalancerState.newBuilder(); builder.setBalancerOn(isBalancerOn); - return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + return ProtobufMagic.prependPBMagic(builder.build().toByteArray()); } private LoadBalancerProtos.LoadBalancerState parseFrom(byte [] pbBytes) throws DeserializationException { - ProtobufUtil.expectPBMagicPrefix(pbBytes); + ProtobufMagic.expectPBMagicPrefix(pbBytes); LoadBalancerProtos.LoadBalancerState.Builder builder = LoadBalancerProtos.LoadBalancerState.newBuilder(); try { - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen); + int magicLen = ProtobufMagic.lengthOfPBMagic(); + ProtobufHelpers.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen); } catch (IOException e) { throw new DeserializationException(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java similarity index 87% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java index 7d6f9fdd91..00ec492cd5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java @@ -27,10 +27,10 @@ import org.apache.zookeeper.KeeperException; * Tracks the master Maintenance Mode via ZK. */ @InterfaceAudience.Private -public class MasterMaintenanceModeTracker extends ZooKeeperListener { +public class MasterMaintenanceModeTracker extends ZKListener { private boolean hasChildren; - public MasterMaintenanceModeTracker(ZooKeeperWatcher watcher) { + public MasterMaintenanceModeTracker(ZKWatcher watcher) { super(watcher); hasChildren = false; } @@ -47,8 +47,8 @@ public class MasterMaintenanceModeTracker extends ZooKeeperListener { private void update() { try { - List children = - ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.znodePaths.masterMaintZNode); + List children = ZooKeeperUtil.listChildrenAndWatchForNewChildren( + watcher, watcher.znodePaths.masterMaintZNode); hasChildren = (children != null && children.size() > 0); } catch (KeeperException e) { // Ignore the ZK keeper exception diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java similarity index 100% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/PendingWatcher.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZK.java similarity index 98% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZK.java index 1c3138bfa6..65de974b13 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZK.java @@ -72,8 +72,8 @@ import org.apache.htrace.TraceScope; * @see "http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling" */ @InterfaceAudience.Private -public class RecoverableZooKeeper { - private static final Log LOG = LogFactory.getLog(RecoverableZooKeeper.class); +public class RecoverableZK { + private static final Log LOG = LogFactory.getLog(RecoverableZK.class); // the actual ZooKeeper client instance private ZooKeeper zk; private final RetryCounterFactory retryCounterFactory; @@ -84,7 +84,7 @@ public class RecoverableZooKeeper { private int sessionTimeout; private String quorumServers; private final Random salter; - private final ZooKeeperMetricsListener metrics; + private final ZKMetricsListener metrics; // The metadata attached to each piece of data has the // format: @@ -99,7 +99,7 @@ public class RecoverableZooKeeper { private static final int ID_LENGTH_OFFSET = MAGIC_SIZE; private static final int ID_LENGTH_SIZE = Bytes.SIZEOF_INT; - public RecoverableZooKeeper(String quorumServers, int sessionTimeout, + public RecoverableZK(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis, int maxSleepTime) throws IOException { this(quorumServers, sessionTimeout, watcher, maxRetries, retryIntervalMillis, maxSleepTime, @@ -108,7 +108,7 @@ public class RecoverableZooKeeper { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", justification="None. Its always been this way.") - public RecoverableZooKeeper(String quorumServers, int sessionTimeout, + public RecoverableZK(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis, int maxSleepTime, String identifier) throws IOException { // TODO: Add support for zk 'chroot'; we don't add it to the quorumServers String as we should. @@ -127,7 +127,7 @@ public class RecoverableZooKeeper { this.watcher = watcher; this.sessionTimeout = sessionTimeout; this.quorumServers = quorumServers; - this.metrics = new MetricsZooKeeper(); + this.metrics = new ZKMetrics(); try {checkZk();} catch (Exception x) {/* ignore */} salter = new Random(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java similarity index 82% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java index a9939347c7..ff954cd68e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java @@ -22,7 +22,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ProtobufHelpers; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionNormalizerProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; @@ -32,10 +33,10 @@ import java.io.IOException; /** * Tracks region normalizer state up in ZK */ -public class RegionNormalizerTracker extends ZooKeeperNodeTracker { +public class RegionNormalizerTracker extends ZKNodeTracker { private static final Log LOG = LogFactory.getLog(RegionNormalizerTracker.class); - public RegionNormalizerTracker(ZooKeeperWatcher watcher, + public RegionNormalizerTracker(ZKWatcher watcher, Abortable abortable) { super(watcher, watcher.znodePaths.regionNormalizerZNode, abortable); } @@ -64,9 +65,9 @@ public class RegionNormalizerTracker extends ZooKeeperNodeTracker { public void setNormalizerOn(boolean normalizerOn) throws KeeperException { byte [] upData = toByteArray(normalizerOn); try { - ZKUtil.setData(watcher, watcher.znodePaths.regionNormalizerZNode, upData); + ZooKeeperUtil.setData(watcher, watcher.znodePaths.regionNormalizerZNode, upData); } catch(KeeperException.NoNodeException nne) { - ZKUtil.createAndWatch(watcher, watcher.znodePaths.regionNormalizerZNode, upData); + ZooKeeperUtil.createAndWatch(watcher, watcher.znodePaths.regionNormalizerZNode, upData); } super.nodeDataChanged(watcher.znodePaths.regionNormalizerZNode); } @@ -75,17 +76,17 @@ public class RegionNormalizerTracker extends ZooKeeperNodeTracker { RegionNormalizerProtos.RegionNormalizerState.Builder builder = RegionNormalizerProtos.RegionNormalizerState.newBuilder(); builder.setNormalizerOn(isNormalizerOn); - return ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + return ProtobufMagic.prependPBMagic(builder.build().toByteArray()); } private RegionNormalizerProtos.RegionNormalizerState parseFrom(byte [] pbBytes) throws DeserializationException { - ProtobufUtil.expectPBMagicPrefix(pbBytes); + ProtobufMagic.expectPBMagicPrefix(pbBytes); RegionNormalizerProtos.RegionNormalizerState.Builder builder = RegionNormalizerProtos.RegionNormalizerState.newBuilder(); try { - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen); + int magicLen = ProtobufMagic.lengthOfPBMagic(); + ProtobufHelpers.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen); } catch (IOException e) { throw new DeserializationException(e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java similarity index 86% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java index 3df9880a77..c971c9e27a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAclReset.java @@ -43,31 +43,31 @@ import org.apache.zookeeper.ZooKeeper; * Use -set-acls to set the ACLs, no option to erase ACLs */ @InterfaceAudience.Private -public class ZkAclReset extends Configured implements Tool { - private static final Log LOG = LogFactory.getLog(ZkAclReset.class); +public class ZKAclReset extends Configured implements Tool { + private static final Log LOG = LogFactory.getLog(ZKAclReset.class); - private static void resetAcls(final ZooKeeperWatcher zkw, final String znode, + private static void resetAcls(final ZKWatcher zkw, final String znode, final boolean eraseAcls) throws Exception { - List children = ZKUtil.listChildrenNoWatch(zkw, znode); + List children = ZooKeeperUtil.listChildrenNoWatch(zkw, znode); if (children != null) { for (String child: children) { - resetAcls(zkw, ZKUtil.joinZNode(znode, child), eraseAcls); + resetAcls(zkw, ZooKeeperUtil.joinZNode(znode, child), eraseAcls); } } - ZooKeeper zk = zkw.getRecoverableZooKeeper().getZooKeeper(); + ZooKeeper zk = zkw.getRecoverableZK().getZooKeeper(); if (eraseAcls) { LOG.info(" - erase ACLs for " + znode); zk.setACL(znode, ZooDefs.Ids.OPEN_ACL_UNSAFE, -1); } else { LOG.info(" - set ACLs for " + znode); - zk.setACL(znode, ZKUtil.createACL(zkw, znode, true), -1); + zk.setACL(znode, ZooKeeperUtil.createACL(zkw, znode, true), -1); } } private static void resetAcls(final Configuration conf, boolean eraseAcls) throws Exception { - ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "ZkAclReset", null); + ZKWatcher zkw = new ZKWatcher(conf, "ZKAclReset", null); try { LOG.info((eraseAcls ? "Erase" : "Set") + " HBase ACLs for " + zkw.getQuorum() + " " + zkw.znodePaths.baseZNode); @@ -111,6 +111,6 @@ public class ZkAclReset extends Configured implements Tool { } public static void main(String[] args) throws Exception { - System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZkAclReset(), args)); + System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKAclReset(), args)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java similarity index 88% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java index b0610b01ea..1b637de1c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java @@ -40,7 +40,7 @@ import org.apache.zookeeper.KeeperException; */ @Deprecated @InterfaceAudience.Private -public class ZKLeaderManager extends ZooKeeperListener { +public class ZKLeaderManager extends ZKListener { private static final Log LOG = LogFactory.getLog(ZKLeaderManager.class); private final AtomicBoolean leaderExists = new AtomicBoolean(); @@ -48,7 +48,7 @@ public class ZKLeaderManager extends ZooKeeperListener { private byte[] nodeId; private Stoppable candidate; - public ZKLeaderManager(ZooKeeperWatcher watcher, String leaderZNode, + public ZKLeaderManager(ZKWatcher watcher, String leaderZNode, byte[] identifier, Stoppable candidate) { super(watcher); this.leaderZNode = leaderZNode; @@ -59,9 +59,9 @@ public class ZKLeaderManager extends ZooKeeperListener { public void start() { try { watcher.registerListener(this); - String parent = ZKUtil.getParent(leaderZNode); - if (ZKUtil.checkExists(watcher, parent) < 0) { - ZKUtil.createWithParents(watcher, parent); + String parent = ZooKeeperUtil.getParent(leaderZNode); + if (ZooKeeperUtil.checkExists(watcher, parent) < 0) { + ZooKeeperUtil.createWithParents(watcher, parent); } } catch (KeeperException ke) { watcher.abort("Unhandled zk exception when starting", ke); @@ -86,7 +86,7 @@ public class ZKLeaderManager extends ZooKeeperListener { private void handleLeaderChange() { try { synchronized(leaderExists) { - if (ZKUtil.watchAndCheckExists(watcher, leaderZNode)) { + if (ZooKeeperUtil.watchAndCheckExists(watcher, leaderZNode)) { LOG.info("Found new leader for znode: "+leaderZNode); leaderExists.set(true); } else { @@ -107,7 +107,7 @@ public class ZKLeaderManager extends ZooKeeperListener { public void waitToBecomeLeader() { while (!candidate.isStopped()) { try { - if (ZKUtil.createEphemeralNodeAndWatch(watcher, leaderZNode, nodeId)) { + if (ZooKeeperUtil.createEphemeralNodeAndWatch(watcher, leaderZNode, nodeId)) { // claimed the leader znode leaderExists.set(true); if (LOG.isDebugEnabled()) { @@ -118,12 +118,12 @@ public class ZKLeaderManager extends ZooKeeperListener { } // if claiming the node failed, there should be another existing node - byte[] currentId = ZKUtil.getDataAndWatch(watcher, leaderZNode); + byte[] currentId = ZooKeeperUtil.getDataAndWatch(watcher, leaderZNode); if (currentId != null && Bytes.equals(currentId, nodeId)) { // claimed with our ID, but we didn't grab it, possibly restarted? LOG.info("Found existing leader with our ID ("+ Bytes.toStringBinary(nodeId)+"), removing"); - ZKUtil.deleteNode(watcher, leaderZNode); + ZooKeeperUtil.deleteNode(watcher, leaderZNode); leaderExists.set(false); } else { LOG.info("Found existing leader with ID: "+Bytes.toStringBinary(nodeId)); @@ -157,10 +157,10 @@ public class ZKLeaderManager extends ZooKeeperListener { if (!leaderExists.get()) { return; } - byte[] leaderId = ZKUtil.getData(watcher, leaderZNode); + byte[] leaderId = ZooKeeperUtil.getData(watcher, leaderZNode); if (leaderId != null && Bytes.equals(nodeId, leaderId)) { LOG.info("Stepping down as leader"); - ZKUtil.deleteNodeFailSilent(watcher, leaderZNode); + ZooKeeperUtil.deleteNodeFailSilent(watcher, leaderZNode); leaderExists.set(false); } else { LOG.info("Not current leader, no need to step down"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java similarity index 89% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java index f78f1d8ad4..6709b63220 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperListener.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKListener.java @@ -23,9 +23,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** * Base class for internal listeners of ZooKeeper events. * - * The {@link ZooKeeperWatcher} for a process will execute the appropriate + * The {@link ZKWatcher} for a process will execute the appropriate * methods of implementations of this class. In order to receive events from - * the watcher, every listener must register itself via {@link ZooKeeperWatcher#registerListener}. + * the watcher, every listener must register itself via {@link ZKWatcher#registerListener}. * * Subclasses need only override those methods in which they are interested. * @@ -33,15 +33,15 @@ import org.apache.yetus.audience.InterfaceAudience; * they must not be long-running. */ @InterfaceAudience.Private -public abstract class ZooKeeperListener { +public abstract class ZKListener { // Reference to the zk watcher which also contains configuration and constants - protected ZooKeeperWatcher watcher; + protected ZKWatcher watcher; /** * Construct a ZooKeeper event listener. */ - public ZooKeeperListener(ZooKeeperWatcher watcher) { + public ZKListener(ZKWatcher watcher) { this.watcher = watcher; } @@ -80,7 +80,7 @@ public abstract class ZooKeeperListener { /** * @return The watcher associated with this listener */ - public ZooKeeperWatcher getWatcher() { + public ZKWatcher getWatcher() { return this.watcher; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java similarity index 97% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java index cf76cbb92c..ed5d88e084 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMainServer.java @@ -33,7 +33,7 @@ import org.apache.zookeeper.ZooKeeperMain; * from HBase XML configuration. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -public class ZooKeeperMainServer { +public class ZKMainServer { private static final String SERVER_ARG = "-server"; public String parse(final Configuration c) { @@ -91,7 +91,7 @@ public class ZooKeeperMainServer { if (!hasServer(args)) { // Add the zk ensemble from configuration if none passed on command-line. Configuration conf = HBaseConfiguration.create(); - String hostport = new ZooKeeperMainServer().parse(conf); + String hostport = new ZKMainServer().parse(conf); if (hostport != null && hostport.length() > 0) { newArgs = new String[args.length + 2]; System.arraycopy(args, 0, newArgs, 2, args.length); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetrics.java similarity index 90% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetrics.java index 5632031c0d..a8caf3345b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetrics.java @@ -23,23 +23,21 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; -import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl; /** * Class used to push numbers about ZooKeeper into the metrics subsystem. This will take a * single function call and turn it into multiple manipulations of the hadoop metrics system. */ @InterfaceAudience.Private -public class MetricsZooKeeper implements ZooKeeperMetricsListener { +public class ZKMetrics implements ZKMetricsListener { private final MetricsZooKeeperSource source; - public MetricsZooKeeper() { + public ZKMetrics() { this(CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class)); } @VisibleForTesting - public MetricsZooKeeper(MetricsZooKeeperSource s) { + public ZKMetrics(MetricsZooKeeperSource s) { this.source = s; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMetricsListener.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetricsListener.java similarity index 98% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMetricsListener.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetricsListener.java index 12ce2cfca5..f17925ef28 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMetricsListener.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetricsListener.java @@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.zookeeper; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public interface ZooKeeperMetricsListener { +public interface ZKMetricsListener { /** * An AUTHFAILED Exception was seen. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodePaths.java similarity index 77% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodePaths.java index 84b53d8628..d32eb809f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodePaths.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hbase.zookeeper; import static org.apache.hadoop.hbase.HConstants.DEFAULT_META_REPLICA_NUM; +import static org.apache.hadoop.hbase.HConstants.DEFAULT_REPLICA_ID; import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.META_REPLICAS_NUM; import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; -import static org.apache.hadoop.hbase.HRegionInfo.DEFAULT_REPLICA_ID; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap; @@ -30,14 +30,13 @@ import java.util.Optional; import java.util.stream.IntStream; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.yetus.audience.InterfaceAudience; /** * Class that hold all the paths of znode for HBase. */ @InterfaceAudience.Private -public class ZNodePaths { +public class ZKNodePaths { public final static String META_ZNODE_PREFIX = "meta-region-server"; @@ -88,54 +87,54 @@ public class ZNodePaths { // znode containing queues of hfile references to be replicated public final String hfileRefsZNode; - public ZNodePaths(Configuration conf) { + public ZKNodePaths(Configuration conf) { baseZNode = conf.get(ZOOKEEPER_ZNODE_PARENT, DEFAULT_ZOOKEEPER_ZNODE_PARENT); ImmutableMap.Builder builder = ImmutableMap.builder(); metaZNodePrefix = conf.get("zookeeper.znode.metaserver", META_ZNODE_PREFIX); - String defaultMetaReplicaZNode = ZKUtil.joinZNode(baseZNode, metaZNodePrefix); + String defaultMetaReplicaZNode = ZooKeeperUtil.joinZNode(baseZNode, metaZNodePrefix); builder.put(DEFAULT_REPLICA_ID, defaultMetaReplicaZNode); int numMetaReplicas = conf.getInt(META_REPLICAS_NUM, DEFAULT_META_REPLICA_NUM); IntStream.range(1, numMetaReplicas) .forEachOrdered(i -> builder.put(i, defaultMetaReplicaZNode + "-" + i)); metaReplicaZNodes = builder.build(); - rsZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs")); - drainingZNode = ZKUtil.joinZNode(baseZNode, + rsZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.rs", "rs")); + drainingZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.draining.rs", "draining")); - masterAddressZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); - backupMasterAddressesZNode = ZKUtil.joinZNode(baseZNode, + masterAddressZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.master", "master")); + backupMasterAddressesZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.backup.masters", "backup-masters")); - clusterStateZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running")); - tableZNode = ZKUtil.joinZNode(baseZNode, + clusterStateZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.state", "running")); + tableZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableEnableDisable", "table")); - clusterIdZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid")); - splitLogZNode = ZKUtil.joinZNode(baseZNode, + clusterIdZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.clusterId", "hbaseid")); + splitLogZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.splitlog", SPLIT_LOGDIR_NAME)); - balancerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); - regionNormalizerZNode = ZKUtil.joinZNode(baseZNode, + balancerZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.balancer", "balancer")); + regionNormalizerZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); - switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); - tableLockZNode = ZKUtil.joinZNode(baseZNode, + switchZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); + tableLockZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); - recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode, + recoveringRegionsZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); - namespaceZNode = ZKUtil.joinZNode(baseZNode, + namespaceZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); - masterMaintZNode = ZKUtil.joinZNode(baseZNode, + masterMaintZNode = ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); replicationZNode = - ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); + ZooKeeperUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.replication", "replication")); peersZNode = - ZKUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers")); + ZooKeeperUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.peers", "peers")); queuesZNode = - ZKUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.rs", "rs")); + ZooKeeperUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.rs", "rs")); hfileRefsZNode = - ZKUtil.joinZNode(replicationZNode, + ZooKeeperUtil.joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); } @Override public String toString() { - return "ZNodePaths [baseZNode=" + baseZNode + ", metaReplicaZNodes=" + metaReplicaZNodes + return "ZKNodePaths [baseZNode=" + baseZNode + ", metaReplicaZNodes=" + metaReplicaZNodes + ", rsZNode=" + rsZNode + ", drainingZNode=" + drainingZNode + ", masterAddressZNode=" + masterAddressZNode + ", backupMasterAddressesZNode=" + backupMasterAddressesZNode + ", clusterStateZNode=" + clusterStateZNode + ", tableZNode=" + tableZNode @@ -180,17 +179,8 @@ public class ZNodePaths { */ public int getMetaReplicaIdFromZnode(String znode) { if (znode.equals(metaZNodePrefix)) { - return HRegionInfo.DEFAULT_REPLICA_ID; + return DEFAULT_REPLICA_ID; } return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); } - - /** - * Is it the default meta replica's znode - * @param znode - * @return true or false - */ - public boolean isDefaultMetaReplicaZnode(String znode) { - return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode); - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java similarity index 88% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java index a5b084b001..6039ccb05f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKNodeTracker.java @@ -27,16 +27,16 @@ import org.apache.zookeeper.KeeperException; /** * Tracks the availability and value of a single ZooKeeper node. * - *

Utilizes the {@link ZooKeeperListener} interface to get the necessary + *

Utilizes the {@link ZKListener} interface to get the necessary * ZooKeeper events related to the node. * *

This is the base class used by trackers in both the Master and * RegionServers. */ @InterfaceAudience.Private -public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { +public abstract class ZKNodeTracker extends ZKListener { // LOG is being used in subclasses, hence keeping it protected - protected static final Log LOG = LogFactory.getLog(ZooKeeperNodeTracker.class); + protected static final Log LOG = LogFactory.getLog(ZKNodeTracker.class); /** Path of node being tracked */ protected final String node; @@ -57,7 +57,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { * @param node * @param abortable */ - public ZooKeeperNodeTracker(ZooKeeperWatcher watcher, String node, + public ZKNodeTracker(ZKWatcher watcher, String node, Abortable abortable) { super(watcher); this.node = node; @@ -74,8 +74,8 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { public synchronized void start() { this.watcher.registerListener(this); try { - if(ZKUtil.watchAndCheckExists(watcher, node)) { - byte [] data = ZKUtil.getDataAndWatch(watcher, node); + if(ZooKeeperUtil.watchAndCheckExists(watcher, node)) { + byte [] data = ZooKeeperUtil.getDataAndWatch(watcher, node); if(data != null) { this.data = data; } else { @@ -123,7 +123,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { if (refresh) { try { // This does not create a watch if the node does not exists - this.data = ZKUtil.getDataAndWatch(watcher, node); + this.data = ZooKeeperUtil.getDataAndWatch(watcher, node); } catch(KeeperException e) { // We use to abort here, but in some cases the abort is ignored ( // (empty Abortable), so it's better to log... @@ -135,7 +135,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { while (!this.stopped && (notimeout || remaining > 0) && this.data == null) { if (!nodeExistsChecked) { try { - nodeExistsChecked = (ZKUtil.checkExists(watcher, node) != -1); + nodeExistsChecked = (ZooKeeperUtil.checkExists(watcher, node) != -1); } catch (KeeperException e) { LOG.warn( "Got exception while trying to check existence in ZooKeeper" + @@ -147,7 +147,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { LOG.debug("Node " + node + " now exists, resetting a watcher"); try { // This does not create a watch if the node does not exists - this.data = ZKUtil.getDataAndWatch(watcher, node); + this.data = ZooKeeperUtil.getDataAndWatch(watcher, node); } catch (KeeperException e) { LOG.warn("Unexpected exception handling blockUntilAvailable", e); abortable.abort("Unexpected exception handling blockUntilAvailable", e); @@ -174,7 +174,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { public synchronized byte [] getData(boolean refresh) { if (refresh) { try { - this.data = ZKUtil.getDataAndWatch(watcher, node); + this.data = ZooKeeperUtil.getDataAndWatch(watcher, node); } catch(KeeperException e) { abortable.abort("Unexpected exception handling getData", e); } @@ -190,7 +190,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { public synchronized void nodeCreated(String path) { if (!path.equals(node)) return; try { - byte [] data = ZKUtil.getDataAndWatch(watcher, node); + byte [] data = ZooKeeperUtil.getDataAndWatch(watcher, node); if (data != null) { this.data = data; notifyAll(); @@ -206,7 +206,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { public synchronized void nodeDeleted(String path) { if(path.equals(node)) { try { - if(ZKUtil.watchAndCheckExists(watcher, node)) { + if(ZooKeeperUtil.watchAndCheckExists(watcher, node)) { nodeCreated(path); } else { this.data = null; @@ -223,7 +223,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { nodeCreated(path); } } - + /** * Checks if the baseznode set as per the property 'zookeeper.znode.parent' * exists. @@ -232,7 +232,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { */ public boolean checkIfBaseNodeAvailable() { try { - if (ZKUtil.checkExists(watcher, watcher.znodePaths.baseZNode) == -1) { + if (ZooKeeperUtil.checkExists(watcher, watcher.znodePaths.baseZNode) == -1) { return false; } } catch (KeeperException e) { @@ -245,7 +245,7 @@ public abstract class ZooKeeperNodeTracker extends ZooKeeperListener { @Override public String toString() { - return "ZooKeeperNodeTracker{" + + return "ZKNodeTracker{" + "node='" + node + ", stopped=" + stopped + '}'; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java similarity index 100% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKServerTool.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java similarity index 54% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index 05cd8a2c22..a7379e6b7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; -import java.io.InterruptedIOException; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; @@ -30,14 +29,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.zookeeper.KeeperException; /** - * Common methods and attributes used by {@link org.apache.hadoop.hbase.master.SplitLogManager} - * and {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker} - * running distributed splitting of WAL logs. + * Common methods and attributes used by org.apache.hadoop.hbase.master.SplitLogManager + * and org.apache.hadoop.hbase.regionserver.SplitLogWorker running distributed splitting of WAL. */ @InterfaceAudience.Private public class ZKSplitLog { @@ -49,8 +45,8 @@ public class ZKSplitLog { * @param zkw zk reference * @param filename log file name (only the basename) */ - public static String getEncodedNodeName(ZooKeeperWatcher zkw, String filename) { - return ZKUtil.joinZNode(zkw.znodePaths.splitLogZNode, encode(filename)); + public static String getEncodedNodeName(ZKWatcher zkw, String filename) { + return ZooKeeperUtil.joinZNode(zkw.znodePaths.splitLogZNode, encode(filename)); } public static String getFileName(String node) { @@ -74,8 +70,8 @@ public class ZKSplitLog { } } - public static String getRescanNode(ZooKeeperWatcher zkw) { - return ZKUtil.joinZNode(zkw.znodePaths.splitLogZNode, "RESCAN"); + public static String getRescanNode(ZKWatcher zkw) { + return ZooKeeperUtil.joinZNode(zkw.znodePaths.splitLogZNode, "RESCAN"); } /** @@ -91,7 +87,7 @@ public class ZKSplitLog { * @param path the absolute path, starts with '/' * @return whether the path represents a rescan node */ - public static boolean isRescanNode(ZooKeeperWatcher zkw, String path) { + public static boolean isRescanNode(ZKWatcher zkw, String path) { String prefix = getRescanNode(zkw); if (path.length() <= prefix.length()) { return false; @@ -104,7 +100,7 @@ public class ZKSplitLog { return true; } - public static boolean isTaskPath(ZooKeeperWatcher zkw, String path) { + public static boolean isTaskPath(ZKWatcher zkw, String path) { String dirname = path.substring(0, path.lastIndexOf('/')); return dirname.equals(zkw.znodePaths.splitLogZNode); } @@ -137,10 +133,6 @@ public class ZKSplitLog { return isCorrupt; } - /* - * Following methods come from SplitLogManager - */ - /** * check if /hbase/recovering-regions/<current region encoded name> * exists. Returns true if exists and set watcher as well. @@ -150,43 +142,28 @@ public class ZKSplitLog { * @throws KeeperException */ public static boolean - isRegionMarkedRecoveringInZK(ZooKeeperWatcher zkw, String regionEncodedName) + isRegionMarkedRecoveringInZK(ZKWatcher zkw, String regionEncodedName) throws KeeperException { boolean result = false; - String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, regionEncodedName); + String nodePath = ZooKeeperUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, regionEncodedName); - byte[] node = ZKUtil.getDataAndWatch(zkw, nodePath); + byte[] node = ZooKeeperUtil.getDataAndWatch(zkw, nodePath); if (node != null) { result = true; } return result; } - /** - * @param bytes - Content of a failed region server or recovering region znode. - * @return long - The last flushed sequence Id for the region server - */ - public static long parseLastFlushedSequenceIdFrom(final byte[] bytes) { - long lastRecordedFlushedSequenceId = -1l; - try { - lastRecordedFlushedSequenceId = ZKUtil.parseWALPositionFrom(bytes); - } catch (DeserializationException e) { - lastRecordedFlushedSequenceId = -1l; - LOG.warn("Can't parse last flushed sequence Id", e); - } - return lastRecordedFlushedSequenceId; - } - - public static void deleteRecoveringRegionZNodes(ZooKeeperWatcher watcher, List regions) { + public static void deleteRecoveringRegionZNodes(ZKWatcher watcher, List regions) { try { if (regions == null) { // remove all children under /home/recovering-regions LOG.debug("Garbage collecting all recovering region znodes"); - ZKUtil.deleteChildrenRecursively(watcher, watcher.znodePaths.recoveringRegionsZNode); + ZooKeeperUtil.deleteChildrenRecursively(watcher, watcher.znodePaths.recoveringRegionsZNode); } else { for (String curRegion : regions) { - String nodePath = ZKUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, curRegion); - ZKUtil.deleteNodeRecursively(watcher, nodePath); + String nodePath = ZooKeeperUtil.joinZNode(watcher.znodePaths.recoveringRegionsZNode, curRegion); + ZooKeeperUtil.deleteNodeRecursively(watcher, nodePath); } } } catch (KeeperException e) { @@ -194,43 +171,5 @@ public class ZKSplitLog { } } - /** - * This function is used in distributedLogReplay to fetch last flushed sequence id from ZK - * @param zkw - * @param serverName - * @param encodedRegionName - * @return the last flushed sequence ids recorded in ZK of the region for serverName - * @throws IOException - */ - public static RegionStoreSequenceIds getRegionFlushedSequenceId(ZooKeeperWatcher zkw, - String serverName, String encodedRegionName) throws IOException { - // when SplitLogWorker recovers a region by directly replaying unflushed WAL edits, - // last flushed sequence Id changes when newly assigned RS flushes writes to the region. - // If the newly assigned RS fails again(a chained RS failures scenario), the last flushed - // sequence Id name space (sequence Id only valid for a particular RS instance), changes - // when different newly assigned RS flushes the region. - // Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of - // last flushed sequence Id for each failed RS instance. - RegionStoreSequenceIds result = null; - String nodePath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, encodedRegionName); - nodePath = ZKUtil.joinZNode(nodePath, serverName); - try { - byte[] data; - try { - data = ZKUtil.getData(zkw, nodePath); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - if (data != null) { - result = ZKUtil.parseRegionStoreSequenceIds(data); - } - } catch (KeeperException e) { - throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server=" - + serverName + "; region=" + encodedRegionName, e); - } catch (DeserializationException e) { - LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e); - } - return result; - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java similarity index 86% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index 30913dc2eb..6480fcaff5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -21,9 +21,7 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.regex.Matcher; @@ -34,9 +32,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.AuthUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.security.UserGroupInformation; @@ -61,8 +56,8 @@ import org.apache.zookeeper.data.Stat; * deal with connection related events and exceptions are handled here. */ @InterfaceAudience.Private -public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { - private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class); +public class ZKWatcher implements Watcher, Abortable, Closeable { + private static final Log LOG = LogFactory.getLog(ZKWatcher.class); // Identifier for this watcher (for logging only). It is made of the prefix // passed on construction and the zookeeper sessionid. @@ -73,19 +68,19 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private String quorum; // zookeeper connection - private final RecoverableZooKeeper recoverableZooKeeper; + private final RecoverableZK recoverableZK; // abortable in case of zk failure protected Abortable abortable; // Used if abortable is null private boolean aborted = false; - public final ZNodePaths znodePaths; + public final ZKNodePaths znodePaths; // listeners to be notified - private final List listeners = new CopyOnWriteArrayList<>(); + private final List listeners = new CopyOnWriteArrayList<>(); - // Used by ZKUtil:waitForZKConnectionIfAuthenticating to wait for SASL + // Used by ZooKeeperUtil:waitForZKConnectionIfAuthenticating to wait for SASL // negotiation to complete public CountDownLatch saslLatch = new CountDownLatch(1); @@ -103,7 +98,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * @throws IOException * @throws ZooKeeperConnectionException */ - public ZooKeeperWatcher(Configuration conf, String identifier, + public ZKWatcher(Configuration conf, String identifier, Abortable abortable) throws ZooKeeperConnectionException, IOException { this(conf, identifier, abortable, false); } @@ -119,7 +114,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * @throws IOException * @throws ZooKeeperConnectionException */ - public ZooKeeperWatcher(Configuration conf, String identifier, + public ZKWatcher(Configuration conf, String identifier, Abortable abortable, boolean canCreateBaseZNode) throws IOException, ZooKeeperConnectionException { this.conf = conf; @@ -129,18 +124,18 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // handle the syncconnect event. this.identifier = identifier + "0x0"; this.abortable = abortable; - this.znodePaths = new ZNodePaths(conf); + this.znodePaths = new ZKNodePaths(conf); PendingWatcher pendingWatcher = new PendingWatcher(); - this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, pendingWatcher, identifier); + this.recoverableZK = ZooKeeperUtil.connect(conf, quorum, pendingWatcher, identifier); pendingWatcher.prepare(this); if (canCreateBaseZNode) { try { createBaseZNodes(); } catch (ZooKeeperConnectionException zce) { try { - this.recoverableZooKeeper.close(); + this.recoverableZK.close(); } catch (InterruptedException ie) { - LOG.debug("Encountered InterruptedException when closing " + this.recoverableZooKeeper); + LOG.debug("Encountered InterruptedException when closing " + this.recoverableZK); Thread.currentThread().interrupt(); } throw zce; @@ -151,15 +146,15 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private void createBaseZNodes() throws ZooKeeperConnectionException { try { // Create all the necessary "directories" of znodes - ZKUtil.createWithParents(this, znodePaths.baseZNode); - ZKUtil.createAndFailSilent(this, znodePaths.rsZNode); - ZKUtil.createAndFailSilent(this, znodePaths.drainingZNode); - ZKUtil.createAndFailSilent(this, znodePaths.tableZNode); - ZKUtil.createAndFailSilent(this, znodePaths.splitLogZNode); - ZKUtil.createAndFailSilent(this, znodePaths.backupMasterAddressesZNode); - ZKUtil.createAndFailSilent(this, znodePaths.tableLockZNode); - ZKUtil.createAndFailSilent(this, znodePaths.recoveringRegionsZNode); - ZKUtil.createAndFailSilent(this, znodePaths.masterMaintZNode); + ZooKeeperUtil.createWithParents(this, znodePaths.baseZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.rsZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.drainingZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.tableZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.splitLogZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.backupMasterAddressesZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.tableLockZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.recoveringRegionsZNode); + ZooKeeperUtil.createAndFailSilent(this, znodePaths.masterMaintZNode); } catch (KeeperException e) { throw new ZooKeeperConnectionException( prefix("Unexpected KeeperException creating base node"), e); @@ -190,7 +185,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * perms. */ public void checkAndSetZNodeAcls() { - if (!ZKUtil.isSecureZooKeeper(getConfiguration())) { + if (!ZooKeeperUtil.isSecureZooKeeper(getConfiguration())) { LOG.info("not a secure deployment, proceeding"); return; } @@ -198,7 +193,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // Check the base znodes permission first. Only do the recursion if base znode's perms are not // correct. try { - List actualAcls = recoverableZooKeeper.getAcl(znodePaths.baseZNode, new Stat()); + List actualAcls = recoverableZK.getAcl(znodePaths.baseZNode, new Stat()); if (!isBaseZnodeAclSetup(actualAcls)) { LOG.info("setting znode ACLs"); @@ -219,14 +214,14 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * @param znode */ private void setZnodeAclsRecursive(String znode) throws KeeperException, InterruptedException { - List children = recoverableZooKeeper.getChildren(znode, false); + List children = recoverableZK.getChildren(znode, false); for (String child : children) { - setZnodeAclsRecursive(ZKUtil.joinZNode(znode, child)); + setZnodeAclsRecursive(ZooKeeperUtil.joinZNode(znode, child)); } - List acls = ZKUtil.createACL(this, znode, true); + List acls = ZooKeeperUtil.createACL(this, znode, true); LOG.info("Setting ACLs for znode:" + znode + " , acl:" + acls); - recoverableZooKeeper.setAcl(znode, acls, -1); + recoverableZK.setAcl(znode, acls, -1); } /** @@ -372,7 +367,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * @throws KeeperException */ public List getMetaReplicaNodes() throws KeeperException { - List childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, znodePaths.baseZNode); + List childrenOfBaseNode = ZooKeeperUtil.listChildrenNoWatch(this, znodePaths.baseZNode); List metaReplicaNodes = new ArrayList<>(2); if (childrenOfBaseNode != null) { String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server"); @@ -387,7 +382,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * Register the specified listener to receive ZooKeeper events. * @param listener */ - public void registerListener(ZooKeeperListener listener) { + public void registerListener(ZKListener listener) { listeners.add(listener); } @@ -396,11 +391,11 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * the first in the list of current listeners. * @param listener */ - public void registerListenerFirst(ZooKeeperListener listener) { + public void registerListenerFirst(ZKListener listener) { listeners.add(0, listener); } - public void unregisterListener(ZooKeeperListener listener) { + public void unregisterListener(ZKListener listener) { listeners.remove(listener); } @@ -414,7 +409,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { /** * Get a copy of current registered listeners */ - public List getListeners() { + public List getListeners() { return new ArrayList<>(listeners); } @@ -429,12 +424,12 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * Get the connection to ZooKeeper. * @return connection reference to zookeeper */ - public RecoverableZooKeeper getRecoverableZooKeeper() { - return recoverableZooKeeper; + public RecoverableZK getRecoverableZK() { + return recoverableZK; } public void reconnectAfterExpiration() throws IOException, KeeperException, InterruptedException { - recoverableZooKeeper.reconnectAfterExpiration(); + recoverableZK.reconnectAfterExpiration(); } /** @@ -450,7 +445,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { *

* Mainly used for mocking as mockito can not mock a field access. */ - public ZNodePaths getZNodePaths() { + public ZKNodePaths getZNodePaths() { return znodePaths; } @@ -478,28 +473,28 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // Otherwise pass along to the listeners case NodeCreated: { - for(ZooKeeperListener listener : listeners) { + for(ZKListener listener : listeners) { listener.nodeCreated(event.getPath()); } break; } case NodeDeleted: { - for(ZooKeeperListener listener : listeners) { + for(ZKListener listener : listeners) { listener.nodeDeleted(event.getPath()); } break; } case NodeDataChanged: { - for(ZooKeeperListener listener : listeners) { + for(ZKListener listener : listeners) { listener.nodeDataChanged(event.getPath()); } break; } case NodeChildrenChanged: { - for(ZooKeeperListener listener : listeners) { + for(ZKListener listener : listeners) { listener.nodeChildrenChanged(event.getPath()); } break; @@ -523,7 +518,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { switch(event.getState()) { case SyncConnected: this.identifier = this.prefix + "-0x" + - Long.toHexString(this.recoverableZooKeeper.getSessionId()); + Long.toHexString(this.recoverableZK.getSessionId()); // Update our identifier. Otherwise ignore. LOG.debug(this.identifier + " connected"); break; @@ -536,8 +531,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { case Expired: String msg = prefix(this.identifier + " received expired from " + "ZooKeeper, aborting"); - // TODO: One thought is to add call to ZooKeeperListener so say, - // ZooKeeperNodeTracker can zero out its data values. + // TODO: One thought is to add call to ZKListener so say, + // ZKNodeTracker can zero out its data values. if (this.abortable != null) { this.abortable.abort(msg, new KeeperException.SessionExpiredException()); } @@ -566,7 +561,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { * is up-to-date from when we begin the operation. */ public void sync(String path) throws KeeperException { - this.recoverableZooKeeper.sync(path, null, null); + this.recoverableZK.sync(path, null, null); } /** @@ -616,7 +611,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { @Override public void close() { try { - recoverableZooKeeper.close(); + recoverableZK.close(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperConnectionException.java similarity index 97% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java rename to hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperConnectionException.java index 60776dac24..2593aabd96 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperConnectionException.java @@ -16,7 +16,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase; +package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperUtil.java new file mode 100644 index 0000000000..d1aab4cb07 --- /dev/null +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperUtil.java @@ -0,0 +1,1903 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.security.auth.login.AppConfigurationEntry; +import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.AuthUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.security.Superusers; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil.ZKUtilOp.CreateAndFailSilent; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil.ZKUtilOp.DeleteNodeFailSilent; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperUtil.ZKUtilOp.SetData; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.zookeeper.AsyncCallback; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.KeeperException.NoNodeException; +import org.apache.zookeeper.Op; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooDefs.Ids; +import org.apache.zookeeper.ZooDefs.Perms; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.client.ZooKeeperSaslClient; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Id; +import org.apache.zookeeper.data.Stat; +import org.apache.zookeeper.proto.CreateRequest; +import org.apache.zookeeper.proto.DeleteRequest; +import org.apache.zookeeper.proto.SetDataRequest; +import org.apache.zookeeper.server.ZooKeeperSaslServer; + +public class ZooKeeperUtil { + private static final Log LOG = LogFactory.getLog(ZooKeeperUtil.class); + // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. + public static final char ZNODE_PATH_SEPARATOR = '/'; + + /** + * Creates a new connection to ZooKeeper, pulling settings and ensemble config + * from the specified configuration object using methods from {@link ZKConfig}. + * + * Sets the connection status monitoring watcher to the specified watcher. + * + * @param conf configuration to pull ensemble and other settings from + * @param watcher watcher to monitor connection changes + * @return connection to zookeeper + * @throws IOException if unable to connect to zk or config problem + */ + public static RecoverableZK connect(Configuration conf, Watcher watcher) + throws IOException { + String ensemble = ZKConfig.getZKQuorumServersString(conf); + return connect(conf, ensemble, watcher); + } + + public static RecoverableZK connect(Configuration conf, String ensemble, + Watcher watcher) + throws IOException { + return connect(conf, ensemble, watcher, null); + } + + public static RecoverableZK connect(Configuration conf, String ensemble, + Watcher watcher, final String identifier) + throws IOException { + if(ensemble == null) { + throw new IOException("Unable to determine ZooKeeper ensemble"); + } + int timeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT, + HConstants.DEFAULT_ZK_SESSION_TIMEOUT); + if (LOG.isTraceEnabled()) { + LOG.trace(identifier + " opening connection to ZooKeeper ensemble=" + ensemble); + } + int retry = conf.getInt("zookeeper.recovery.retry", 3); + int retryIntervalMillis = + conf.getInt("zookeeper.recovery.retry.intervalmill", 1000); + int maxSleepTime = conf.getInt("zookeeper.recovery.retry.maxsleeptime", 60000); + return new RecoverableZK(ensemble, timeout, watcher, + retry, retryIntervalMillis, maxSleepTime, identifier); + } + + /** + * Log in the current zookeeper server process using the given configuration + * keys for the credential file and login principal. + * + *

This is only applicable when running on secure hbase + * On regular HBase (without security features), this will safely be ignored. + *

+ * + * @param conf The configuration data to use + * @param keytabFileKey Property key used to configure the path to the credential file + * @param userNameKey Property key used to configure the login principal + * @param hostname Current hostname to use in any credentials + * @throws IOException underlying exception from SecurityUtil.login() call + */ + public static void loginServer(Configuration conf, String keytabFileKey, + String userNameKey, String hostname) throws IOException { + login(conf, keytabFileKey, userNameKey, hostname, + ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY, + JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME); + } + + /** + * Log in the current zookeeper client using the given configuration + * keys for the credential file and login principal. + * + *

This is only applicable when running on secure hbase + * On regular HBase (without security features), this will safely be ignored. + *

+ * + * @param conf The configuration data to use + * @param keytabFileKey Property key used to configure the path to the credential file + * @param userNameKey Property key used to configure the login principal + * @param hostname Current hostname to use in any credentials + * @throws IOException underlying exception from SecurityUtil.login() call + */ + public static void loginClient(Configuration conf, String keytabFileKey, + String userNameKey, String hostname) throws IOException { + login(conf, keytabFileKey, userNameKey, hostname, + ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, + JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME); + } + + /** + * Log in the current process using the given configuration keys for the + * credential file and login principal. + * + *

This is only applicable when running on secure hbase + * On regular HBase (without security features), this will safely be ignored. + *

+ * + * @param conf The configuration data to use + * @param keytabFileKey Property key used to configure the path to the credential file + * @param userNameKey Property key used to configure the login principal + * @param hostname Current hostname to use in any credentials + * @param loginContextProperty property name to expose the entry name + * @param loginContextName jaas entry name + * @throws IOException underlying exception from SecurityUtil.login() call + */ + private static void login(Configuration conf, String keytabFileKey, + String userNameKey, String hostname, + String loginContextProperty, String loginContextName) + throws IOException { + if (!isSecureZooKeeper(conf)) + return; + + // User has specified a jaas.conf, keep this one as the good one. + // HBASE_OPTS="-Djava.security.auth.login.config=jaas.conf" + if (System.getProperty("java.security.auth.login.config") != null) + return; + + // No keytab specified, no auth + String keytabFilename = conf.get(keytabFileKey); + if (keytabFilename == null) { + LOG.warn("no keytab specified for: " + keytabFileKey); + return; + } + + String principalConfig = conf.get(userNameKey, System.getProperty("user.name")); + String principalName = SecurityUtil.getServerPrincipal(principalConfig, hostname); + + // Initialize the "jaas.conf" for keyTab/principal, + // If keyTab is not specified use the Ticket Cache. + // and set the zookeeper login context name. + JaasConfiguration jaasConf = new JaasConfiguration(loginContextName, + principalName, keytabFilename); + javax.security.auth.login.Configuration.setConfiguration(jaasConf); + System.setProperty(loginContextProperty, loginContextName); + } + + /** + * A JAAS configuration that defines the login modules that we want to use for login. + */ + private static class JaasConfiguration extends javax.security.auth.login.Configuration { + private static final String SERVER_KEYTAB_KERBEROS_CONFIG_NAME = + "zookeeper-server-keytab-kerberos"; + private static final String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME = + "zookeeper-client-keytab-kerberos"; + + private static final Map BASIC_JAAS_OPTIONS = new HashMap<>(); + static { + String jaasEnvVar = System.getenv("HBASE_JAAS_DEBUG"); + if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { + BASIC_JAAS_OPTIONS.put("debug", "true"); + } + } + + private static final Map KEYTAB_KERBEROS_OPTIONS = new HashMap<>(); + static { + KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true"); + KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true"); + KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true"); + KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS); + } + + private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN = + new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), + LoginModuleControlFlag.REQUIRED, + KEYTAB_KERBEROS_OPTIONS); + + private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF = + new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN}; + + private javax.security.auth.login.Configuration baseConfig; + private final String loginContextName; + private final boolean useTicketCache; + private final String keytabFile; + private final String principal; + + public JaasConfiguration(String loginContextName, String principal, String keytabFile) { + this(loginContextName, principal, keytabFile, keytabFile == null || keytabFile.length() == 0); + } + + private JaasConfiguration(String loginContextName, String principal, + String keytabFile, boolean useTicketCache) { + try { + this.baseConfig = javax.security.auth.login.Configuration.getConfiguration(); + } catch (SecurityException e) { + this.baseConfig = null; + } + this.loginContextName = loginContextName; + this.useTicketCache = useTicketCache; + this.keytabFile = keytabFile; + this.principal = principal; + LOG.info("JaasConfiguration loginContextName=" + loginContextName + + " principal=" + principal + " useTicketCache=" + useTicketCache + + " keytabFile=" + keytabFile); + } + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String appName) { + if (loginContextName.equals(appName)) { + if (!useTicketCache) { + KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile); + KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true"); + } + KEYTAB_KERBEROS_OPTIONS.put("principal", principal); + KEYTAB_KERBEROS_OPTIONS.put("useTicketCache", useTicketCache ? "true" : "false"); + return KEYTAB_KERBEROS_CONF; + } + if (baseConfig != null) return baseConfig.getAppConfigurationEntry(appName); + return(null); + } + } + + // + // Helper methods + // + + /** + * Join the prefix znode name with the suffix znode name to generate a proper + * full znode name. + * + * Assumes prefix does not end with slash and suffix does not begin with it. + * + * @param prefix beginning of znode name + * @param suffix ending of znode name + * @return result of properly joining prefix with suffix + */ + public static String joinZNode(String prefix, String suffix) { + return prefix + ZNODE_PATH_SEPARATOR + suffix; + } + + /** + * Returns the full path of the immediate parent of the specified node. + * @param node path to get parent of + * @return parent of path, null if passed the root node or an invalid node + */ + public static String getParent(String node) { + int idx = node.lastIndexOf(ZNODE_PATH_SEPARATOR); + return idx <= 0 ? null : node.substring(0, idx); + } + + /** + * Get the name of the current node from the specified fully-qualified path. + * @param path fully-qualified path + * @return name of the current node + */ + public static String getNodeName(String path) { + return path.substring(path.lastIndexOf("/")+1); + } + + // + // Existence checks and watches + // + + /** + * Watch the specified znode for delete/create/change events. The watcher is + * set whether or not the node exists. If the node already exists, the method + * returns true. If the node does not exist, the method returns false. + * + * @param zkw zk reference + * @param znode path of node to watch + * @return true if znode exists, false if does not exist or error + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean watchAndCheckExists(ZKWatcher zkw, String znode) + throws KeeperException { + try { + Stat s = zkw.getRecoverableZK().exists(znode, zkw); + boolean exists = s != null ? true : false; + if (exists) { + LOG.debug(zkw.prefix("Set watcher on existing znode=" + znode)); + } else { + LOG.debug(zkw.prefix("Set watcher on znode that does not yet exist, " + znode)); + } + return exists; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); + zkw.keeperException(e); + return false; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); + zkw.interruptedException(e); + return false; + } + } + + /** + * Watch the specified znode, but only if exists. Useful when watching + * for deletions. Uses .getData() (and handles NoNodeException) instead + * of .exists() to accomplish this, as .getData() will only set a watch if + * the znode exists. + * @param zkw zk reference + * @param znode path of node to watch + * @return true if the watch is set, false if node does not exists + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean setWatchIfNodeExists(ZKWatcher zkw, String znode) + throws KeeperException { + try { + zkw.getRecoverableZK().getData(znode, true, null); + return true; + } catch (NoNodeException e) { + return false; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode " + znode), e); + zkw.interruptedException(e); + return false; + } + } + + /** + * Check if the specified node exists. Sets no watches. + * + * @param zkw zk reference + * @param znode path of node to watch + * @return version of the node if it exists, -1 if does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static int checkExists(ZKWatcher zkw, String znode) + throws KeeperException { + try { + Stat s = zkw.getRecoverableZK().exists(znode, null); + return s != null ? s.getVersion() : -1; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); + zkw.keeperException(e); + return -1; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to set watcher on znode (" + znode + ")"), e); + zkw.interruptedException(e); + return -1; + } + } + + // + // Znode listings + // + + /** + * Lists the children znodes of the specified znode. Also sets a watch on + * the specified znode which will capture a NodeDeleted event on the specified + * znode as well as NodeChildrenChanged if any children of the specified znode + * are created or deleted. + * + * Returns null if the specified node does not exist. Otherwise returns a + * list of children of the specified node. If the node exists but it has no + * children, an empty list will be returned. + * + * @param zkw zk reference + * @param znode path of node to list and watch children of + * @return list of children of the specified node, an empty list if the node + * exists but has no children, and null if the node does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static List listChildrenAndWatchForNewChildren( + ZKWatcher zkw, String znode) + throws KeeperException { + try { + List children = zkw.getRecoverableZK().getChildren(znode, zkw); + return children; + } catch(KeeperException.NoNodeException ke) { + LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + + "because node does not exist (not an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode + " "), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * List all the children of the specified znode, setting a watch for children + * changes and also setting a watch on every individual child in order to get + * the NodeCreated and NodeDeleted events. + * @param zkw zookeeper reference + * @param znode node to get children of and watch + * @return list of znode names, null if the node doesn't exist + * @throws KeeperException + */ + public static List listChildrenAndWatchThem(ZKWatcher zkw, + String znode) throws KeeperException { + List children = listChildrenAndWatchForNewChildren(zkw, znode); + if (children == null) { + return null; + } + for (String child : children) { + watchAndCheckExists(zkw, joinZNode(znode, child)); + } + return children; + } + + /** + * Lists the children of the specified znode without setting any watches. + * + * Sets no watches at all, this method is best effort. + * + * Returns an empty list if the node has no children. Returns null if the + * parent node itself does not exist. + * + * @param zkw zookeeper reference + * @param znode node to get children + * @return list of data of children of specified znode, empty if no children, + * null if parent does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static List listChildrenNoWatch(ZKWatcher zkw, String znode) + throws KeeperException { + List children = null; + try { + // List the children without watching + children = zkw.getRecoverableZK().getChildren(znode, null); + } catch(KeeperException.NoNodeException nne) { + return null; + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + return children; + } + + /** + * Simple class to hold a node path and node data. + * @deprecated Unused + */ + @Deprecated + public static class NodeAndData { + private String node; + private byte [] data; + public NodeAndData(String node, byte [] data) { + this.node = node; + this.data = data; + } + public String getNode() { + return node; + } + public byte [] getData() { + return data; + } + @Override + public String toString() { + return node; + } + public boolean isEmpty() { + return (data == null || data.length == 0); + } + } + + /** + * Checks if the specified znode has any children. Sets no watches. + * + * Returns true if the node exists and has children. Returns false if the + * node does not exist or if the node does not have any children. + * + * Used during master initialization to determine if the master is a + * failed-over-to master or the first master during initial cluster startup. + * If the directory for regionserver ephemeral nodes is empty then this is + * a cluster startup, if not then it is not cluster startup. + * + * @param zkw zk reference + * @param znode path of node to check for children of + * @return true if node has children, false if not or node does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean nodeHasChildren(ZKWatcher zkw, String znode) + throws KeeperException { + try { + return !zkw.getRecoverableZK().getChildren(znode, null).isEmpty(); + } catch(KeeperException.NoNodeException ke) { + LOG.debug(zkw.prefix("Unable to list children of znode " + znode + " " + + "because node does not exist (not an error)")); + return false; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); + zkw.keeperException(e); + return false; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to list children of znode " + znode), e); + zkw.interruptedException(e); + return false; + } + } + + /** + * Get the number of children of the specified node. + * + * If the node does not exist or has no children, returns 0. + * + * Sets no watches at all. + * + * @param zkw zk reference + * @param znode path of node to count children of + * @return number of children of specified node, 0 if none or parent does not + * exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static int getNumberOfChildren(ZKWatcher zkw, String znode) + throws KeeperException { + try { + Stat stat = zkw.getRecoverableZK().exists(znode, null); + return stat == null ? 0 : stat.getNumChildren(); + } catch(KeeperException e) { + LOG.warn(zkw.prefix("Unable to get children of node " + znode)); + zkw.keeperException(e); + } catch(InterruptedException e) { + zkw.interruptedException(e); + } + return 0; + } + + // + // Data retrieval + // + + /** + * Get znode data. Does not set a watcher. + * @return ZNode data, null if the node does not exist or if there is an + * error. + */ + public static byte [] getData(ZKWatcher zkw, String znode) + throws KeeperException, InterruptedException { + try { + byte [] data = zkw.getRecoverableZK().getData(znode, null, null); + logRetrievedMsg(zkw, znode, data, false); + return data; + } catch (KeeperException.NoNodeException e) { + LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.keeperException(e); + return null; + } + } + + /** + * Get the data at the specified znode and set a watch. + * + * Returns the data and sets a watch if the node exists. Returns null and no + * watch is set if the node does not exist or there is an exception. + * + * @param zkw zk reference + * @param znode path of node + * @return data of the specified znode, or null + * @throws KeeperException if unexpected zookeeper exception + */ + public static byte [] getDataAndWatch(ZKWatcher zkw, String znode) + throws KeeperException { + return getDataInternal(zkw, znode, null, true); + } + + /** + * Get the data at the specified znode and set a watch. + * + * Returns the data and sets a watch if the node exists. Returns null and no + * watch is set if the node does not exist or there is an exception. + * + * @param zkw zk reference + * @param znode path of node + * @param stat object to populate the version of the znode + * @return data of the specified znode, or null + * @throws KeeperException if unexpected zookeeper exception + */ + public static byte[] getDataAndWatch(ZKWatcher zkw, String znode, + Stat stat) throws KeeperException { + return getDataInternal(zkw, znode, stat, true); + } + + private static byte[] getDataInternal(ZKWatcher zkw, String znode, Stat stat, + boolean watcherSet) + throws KeeperException { + try { + byte [] data = zkw.getRecoverableZK().getData(znode, zkw, stat); + logRetrievedMsg(zkw, znode, data, watcherSet); + return data; + } catch (KeeperException.NoNodeException e) { + // This log can get pretty annoying when we cycle on 100ms waits. + // Enable trace if you really want to see it. + LOG.trace(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * Get the data at the specified znode without setting a watch. + * + * Returns the data if the node exists. Returns null if the node does not + * exist. + * + * Sets the stats of the node in the passed Stat object. Pass a null stat if + * not interested. + * + * @param zkw zk reference + * @param znode path of node + * @param stat node status to get if node exists + * @return data of the specified znode, or null if node does not exist + * @throws KeeperException if unexpected zookeeper exception + */ + public static byte [] getDataNoWatch(ZKWatcher zkw, String znode, + Stat stat) + throws KeeperException { + try { + byte [] data = zkw.getRecoverableZK().getData(znode, null, stat); + logRetrievedMsg(zkw, znode, data, false); + return data; + } catch (KeeperException.NoNodeException e) { + LOG.debug(zkw.prefix("Unable to get data of znode " + znode + " " + + "because node does not exist (not necessarily an error)")); + return null; + } catch (KeeperException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.keeperException(e); + return null; + } catch (InterruptedException e) { + LOG.warn(zkw.prefix("Unable to get data of znode " + znode), e); + zkw.interruptedException(e); + return null; + } + } + + /** + * Returns the date of child znodes of the specified znode. Also sets a watch on + * the specified znode which will capture a NodeDeleted event on the specified + * znode as well as NodeChildrenChanged if any children of the specified znode + * are created or deleted. + * + * Returns null if the specified node does not exist. Otherwise returns a + * list of children of the specified node. If the node exists but it has no + * children, an empty list will be returned. + * + * @param zkw zk reference + * @param baseNode path of node to list and watch children of + * @return list of data of children of the specified node, an empty list if the node + * exists but has no children, and null if the node does not exist + * @throws KeeperException if unexpected zookeeper exception + * @deprecated Unused + */ + @Deprecated + public static List getChildDataAndWatchForNewChildren( + ZKWatcher zkw, String baseNode) throws KeeperException { + List nodes = listChildrenAndWatchForNewChildren(zkw, baseNode); + if (nodes != null) { + List newNodes = new ArrayList<>(); + for (String node : nodes) { + String nodePath = joinZNode(baseNode, node); + byte[] data = getDataAndWatch(zkw, nodePath); + newNodes.add(new NodeAndData(nodePath, data)); + } + return newNodes; + } + return null; + } + + /** + * Update the data of an existing node with the expected version to have the + * specified data. + * + * Throws an exception if there is a version mismatch or some other problem. + * + * Sets no watches under any conditions. + * + * @param zkw zk reference + * @param znode + * @param data + * @param expectedVersion + * @throws KeeperException if unexpected zookeeper exception + * @throws KeeperException.BadVersionException if version mismatch + * @deprecated Unused + */ + @Deprecated + public static void updateExistingNodeData(ZKWatcher zkw, String znode, + byte [] data, int expectedVersion) + throws KeeperException { + try { + zkw.getRecoverableZK().setData(znode, data, expectedVersion); + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + // + // Data setting + // + + /** + * Sets the data of the existing znode to be the specified data. Ensures that + * the current data has the specified expected version. + * + *

If the node does not exist, a {@link NoNodeException} will be thrown. + * + *

If their is a version mismatch, method returns null. + * + *

No watches are set but setting data will trigger other watchers of this + * node. + * + *

If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data to set for node + * @param expectedVersion version expected when setting data + * @return true if data set, false if version mismatch + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean setData(ZKWatcher zkw, String znode, + byte [] data, int expectedVersion) + throws KeeperException, KeeperException.NoNodeException { + try { + return zkw.getRecoverableZK().setData(znode, data, expectedVersion) != null; + } catch (InterruptedException e) { + zkw.interruptedException(e); + return false; + } + } + + /** + * Set data into node creating node if it doesn't yet exist. + * Does not set watch. + * + * @param zkw zk reference + * @param znode path of node + * @param data data to set for node + * @throws KeeperException + */ + public static void createSetData(final ZKWatcher zkw, final String znode, + final byte [] data) + throws KeeperException { + if (checkExists(zkw, znode) == -1) { + createWithParents(zkw, znode, data); + } else { + setData(zkw, znode, data); + } + } + + /** + * Sets the data of the existing znode to be the specified data. The node + * must exist but no checks are done on the existing data or version. + * + *

If the node does not exist, a {@link NoNodeException} will be thrown. + * + *

No watches are set but setting data will trigger other watchers of this + * node. + * + *

If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data to set for node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void setData(ZKWatcher zkw, String znode, byte [] data) + throws KeeperException { + setData(zkw, (SetData)ZKUtilOp.setData(znode, data)); + } + + private static void setData(ZKWatcher zkw, SetData setData) + throws KeeperException { + SetDataRequest sd = (SetDataRequest)toZooKeeperOp(zkw, setData).toRequestRecord(); + setData(zkw, sd.getPath(), sd.getData(), sd.getVersion()); + } + + /** + * Returns whether or not secure authentication is enabled + * (whether hbase.security.authentication is set to + * kerberos. + */ + public static boolean isSecureZooKeeper(Configuration conf) { + // Detection for embedded HBase client with jaas configuration + // defined for third party programs. + try { + javax.security.auth.login.Configuration testConfig = + javax.security.auth.login.Configuration.getConfiguration(); + if (testConfig.getAppConfigurationEntry("Client") == null + && testConfig.getAppConfigurationEntry( + JaasConfiguration.CLIENT_KEYTAB_KERBEROS_CONFIG_NAME) == null + && testConfig.getAppConfigurationEntry( + JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null + && conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null + && conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null) { + + return false; + } + } catch(Exception e) { + // No Jaas configuration defined. + return false; + } + + // Master & RSs uses hbase.zookeeper.client.* + return "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); + } + + private static ArrayList createACL(ZKWatcher zkw, String node) { + return createACL(zkw, node, isSecureZooKeeper(zkw.getConfiguration())); + } + + public static ArrayList createACL(ZKWatcher zkw, String node, + boolean isSecureZooKeeper) { + if (!node.startsWith(zkw.znodePaths.baseZNode)) { + return Ids.OPEN_ACL_UNSAFE; + } + if (isSecureZooKeeper) { + ArrayList acls = new ArrayList<>(); + // add permission to hbase supper user + String[] superUsers = zkw.getConfiguration().getStrings(Superusers.SUPERUSER_CONF_KEY); + String hbaseUser = null; + try { + hbaseUser = UserGroupInformation.getCurrentUser().getShortUserName(); + } catch (IOException e) { + LOG.debug("Could not acquire current User.", e); + } + if (superUsers != null) { + List groups = new ArrayList<>(); + for (String user : superUsers) { + if (AuthUtil.isGroupPrincipal(user)) { + // TODO: Set node ACL for groups when ZK supports this feature + groups.add(user); + } else { + if(!user.equals(hbaseUser)) { + acls.add(new ACL(Perms.ALL, new Id("sasl", user))); + } + } + } + if (!groups.isEmpty()) { + LOG.warn("Znode ACL setting for group " + groups + + " is skipped, ZooKeeper doesn't support this feature presently."); + } + } + // Certain znodes are accessed directly by the client, + // so they must be readable by non-authenticated clients + if (zkw.isClientReadable(node)) { + acls.addAll(Ids.CREATOR_ALL_ACL); + acls.addAll(Ids.READ_ACL_UNSAFE); + } else { + acls.addAll(Ids.CREATOR_ALL_ACL); + } + return acls; + } else { + return Ids.OPEN_ACL_UNSAFE; + } + } + + // + // Node creation + // + + /** + * + * Set the specified znode to be an ephemeral node carrying the specified + * data. + * + * If the node is created successfully, a watcher is also set on the node. + * + * If the node is not created successfully because it already exists, this + * method will also set a watcher on the node. + * + * If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data of node + * @return true if node created, false if not, watch set in both cases + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean createEphemeralNodeAndWatch(ZKWatcher zkw, + String znode, byte [] data) + throws KeeperException { + boolean ret = true; + try { + zkw.getRecoverableZK().create(znode, data, createACL(zkw, znode), + CreateMode.EPHEMERAL); + } catch (KeeperException.NodeExistsException nee) { + ret = false; + } catch (InterruptedException e) { + LOG.info("Interrupted", e); + Thread.currentThread().interrupt(); + } + if(!watchAndCheckExists(zkw, znode)) { + // It did exist but now it doesn't, try again + return createEphemeralNodeAndWatch(zkw, znode, data); + } + return ret; + } + + /** + * Creates the specified znode to be a persistent node carrying the specified + * data. + * + * Returns true if the node was successfully created, false if the node + * already existed. + * + * If the node is created successfully, a watcher is also set on the node. + * + * If the node is not created successfully because it already exists, this + * method will also set a watcher on the node but return false. + * + * If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data of node + * @return true if node created, false if not, watch set in both cases + * @throws KeeperException if unexpected zookeeper exception + */ + public static boolean createNodeIfNotExistsAndWatch( + ZKWatcher zkw, String znode, byte [] data) + throws KeeperException { + boolean ret = true; + try { + zkw.getRecoverableZK().create(znode, data, createACL(zkw, znode), + CreateMode.PERSISTENT); + } catch (KeeperException.NodeExistsException nee) { + ret = false; + } catch (InterruptedException e) { + zkw.interruptedException(e); + return false; + } + try { + zkw.getRecoverableZK().exists(znode, zkw); + } catch (InterruptedException e) { + zkw.interruptedException(e); + return false; + } + return ret; + } + + /** + * Creates the specified znode with the specified data but does not watch it. + * + * Returns the znode of the newly created node + * + * If there is another problem, a KeeperException will be thrown. + * + * @param zkw zk reference + * @param znode path of node + * @param data data of node + * @param createMode specifying whether the node to be created is ephemeral and/or sequential + * @return true name of the newly created znode or null + * @throws KeeperException if unexpected zookeeper exception + */ + public static String createNodeIfNotExistsNoWatch(ZKWatcher zkw, String znode, + byte[] data, CreateMode createMode) throws KeeperException { + + String createdZNode = null; + try { + createdZNode = zkw.getRecoverableZK().create(znode, data, + createACL(zkw, znode), createMode); + } catch (KeeperException.NodeExistsException nee) { + return znode; + } catch (InterruptedException e) { + zkw.interruptedException(e); + return null; + } + return createdZNode; + } + + /** + * Creates the specified node with the specified data and watches it. + * + *

Throws an exception if the node already exists. + * + *

The node created is persistent and open access. + * + *

Returns the version number of the created node if successful. + * + * @param zkw zk reference + * @param znode path of node to create + * @param data data of node to create + * @return version of node created + * @throws KeeperException if unexpected zookeeper exception + * @throws KeeperException.NodeExistsException if node already exists + */ + public static int createAndWatch(ZKWatcher zkw, + String znode, byte [] data) + throws KeeperException, KeeperException.NodeExistsException { + try { + zkw.getRecoverableZK().create(znode, data, createACL(zkw, znode), + CreateMode.PERSISTENT); + Stat stat = zkw.getRecoverableZK().exists(znode, zkw); + if (stat == null){ + // Likely a race condition. Someone deleted the znode. + throw KeeperException.create(KeeperException.Code.SYSTEMERROR, + "ZK.exists returned null (i.e.: znode does not exist) for znode=" + znode); + } + return stat.getVersion(); + } catch (InterruptedException e) { + zkw.interruptedException(e); + return -1; + } + } + + /** + * Async creates the specified node with the specified data. + * + *

Throws an exception if the node already exists. + * + *

The node created is persistent and open access. + * + * @param zkw zk reference + * @param znode path of node to create + * @param data data of node to create + * @param cb + * @param ctx + */ + public static void asyncCreate(ZKWatcher zkw, + String znode, byte [] data, final AsyncCallback.StringCallback cb, + final Object ctx) { + zkw.getRecoverableZK().getZooKeeper().create(znode, data, + createACL(zkw, znode), CreateMode.PERSISTENT, cb, ctx); + } + + /** + * Creates the specified node, iff the node does not exist. Does not set a + * watch and fails silently if the node already exists. + * + * The node created is persistent and open access. + * + * @param zkw zk reference + * @param znode path of node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void createAndFailSilent(ZKWatcher zkw, + String znode) throws KeeperException { + createAndFailSilent(zkw, znode, new byte[0]); + } + + /** + * Creates the specified node containing specified data, iff the node does not exist. Does + * not set a watch and fails silently if the node already exists. + * + * The node created is persistent and open access. + * + * @param zkw zk reference + * @param znode path of node + * @param data a byte array data to store in the znode + * @throws KeeperException if unexpected zookeeper exception + */ + public static void createAndFailSilent(ZKWatcher zkw, + String znode, byte[] data) + throws KeeperException { + createAndFailSilent(zkw, + (CreateAndFailSilent)ZKUtilOp.createAndFailSilent(znode, data)); + } + + private static void createAndFailSilent(ZKWatcher zkw, CreateAndFailSilent cafs) + throws KeeperException { + CreateRequest create = (CreateRequest)toZooKeeperOp(zkw, cafs).toRequestRecord(); + String znode = create.getPath(); + try { + RecoverableZK zk = zkw.getRecoverableZK(); + if (zk.exists(znode, false) == null) { + zk.create(znode, create.getData(), create.getAcl(), CreateMode.fromFlag(create.getFlags())); + } + } catch(KeeperException.NodeExistsException nee) { + } catch(KeeperException.NoAuthException nee){ + try { + if (null == zkw.getRecoverableZK().exists(znode, false)) { + // If we failed to create the file and it does not already exist. + throw(nee); + } + } catch (InterruptedException ie) { + zkw.interruptedException(ie); + } + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + /** + * Creates the specified node and all parent nodes required for it to exist. + * + * No watches are set and no errors are thrown if the node already exists. + * + * The nodes created are persistent and open access. + * + * @param zkw zk reference + * @param znode path of node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void createWithParents(ZKWatcher zkw, String znode) + throws KeeperException { + createWithParents(zkw, znode, new byte[0]); + } + + /** + * Creates the specified node and all parent nodes required for it to exist. The creation of + * parent znodes is not atomic with the leafe znode creation but the data is written atomically + * when the leaf node is created. + * + * No watches are set and no errors are thrown if the node already exists. + * + * The nodes created are persistent and open access. + * + * @param zkw zk reference + * @param znode path of node + * @throws KeeperException if unexpected zookeeper exception + */ + public static void createWithParents(ZKWatcher zkw, String znode, byte[] data) + throws KeeperException { + try { + if(znode == null) { + return; + } + zkw.getRecoverableZK().create(znode, data, createACL(zkw, znode), + CreateMode.PERSISTENT); + } catch(KeeperException.NodeExistsException nee) { + return; + } catch(KeeperException.NoNodeException nne) { + createWithParents(zkw, getParent(znode)); + createWithParents(zkw, znode, data); + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + // + // Deletes + // + + /** + * Delete the specified node. Sets no watches. Throws all exceptions. + */ + public static void deleteNode(ZKWatcher zkw, String node) + throws KeeperException { + deleteNode(zkw, node, -1); + } + + /** + * Delete the specified node with the specified version. Sets no watches. + * Throws all exceptions. + */ + public static boolean deleteNode(ZKWatcher zkw, String node, + int version) + throws KeeperException { + try { + zkw.getRecoverableZK().delete(node, version); + return true; + } catch(KeeperException.BadVersionException bve) { + return false; + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + return false; + } + } + + /** + * Deletes the specified node. Fails silent if the node does not exist. + * @param zkw + * @param node + * @throws KeeperException + */ + public static void deleteNodeFailSilent(ZKWatcher zkw, String node) + throws KeeperException { + deleteNodeFailSilent(zkw, + (DeleteNodeFailSilent)ZKUtilOp.deleteNodeFailSilent(node)); + } + + private static void deleteNodeFailSilent(ZKWatcher zkw, + DeleteNodeFailSilent dnfs) throws KeeperException { + DeleteRequest delete = (DeleteRequest)toZooKeeperOp(zkw, dnfs).toRequestRecord(); + try { + zkw.getRecoverableZK().delete(delete.getPath(), delete.getVersion()); + } catch(KeeperException.NoNodeException nne) { + } catch(InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + + /** + * Delete the specified node and all of it's children. + *

+ * If the node does not exist, just returns. + *

+ * Sets no watches. Throws all exceptions besides dealing with deletion of + * children. + */ + public static void deleteNodeRecursively(ZKWatcher zkw, String node) + throws KeeperException { + deleteNodeRecursivelyMultiOrSequential(zkw, true, node); + } + + /** + * Delete all the children of the specified node but not the node itself. + * + * Sets no watches. Throws all exceptions besides dealing with deletion of + * children. + * + * @throws KeeperException + */ + public static void deleteChildrenRecursively(ZKWatcher zkw, String node) + throws KeeperException { + deleteChildrenRecursivelyMultiOrSequential(zkw, true, node); + } + + /** + * Delete all the children of the specified node but not the node itself. This + * will first traverse the znode tree for listing the children and then delete + * these znodes using multi-update api or sequential based on the specified + * configurations. + *

+ * Sets no watches. Throws all exceptions besides dealing with deletion of + * children. + *

+ * If the following is true: + *

    + *
  • runSequentialOnMultiFailure is true + *
+ * on calling multi, we get a ZooKeeper exception that can be handled by a + * sequential call(*), we retry the operations one-by-one (sequentially). + * + * @param zkw + * - zk reference + * @param runSequentialOnMultiFailure + * - if true when we get a ZooKeeper exception that could retry the + * operations one-by-one (sequentially) + * @param pathRoots + * - path of the parent node(s) + * @throws KeeperException.NotEmptyException + * if node has children while deleting + * @throws KeeperException + * if unexpected ZooKeeper exception + * @throws IllegalArgumentException + * if an invalid path is specified + */ + public static void deleteChildrenRecursivelyMultiOrSequential( + ZKWatcher zkw, boolean runSequentialOnMultiFailure, + String... pathRoots) throws KeeperException { + if (pathRoots == null || pathRoots.length <= 0) { + LOG.warn("Given path is not valid!"); + return; + } + List ops = new ArrayList<>(); + for (String eachRoot : pathRoots) { + List children = listChildrenBFSNoWatch(zkw, eachRoot); + // Delete the leaves first and eventually get rid of the root + for (int i = children.size() - 1; i >= 0; --i) { + ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i))); + } + } + // atleast one element should exist + if (ops.size() > 0) { + multiOrSequential(zkw, ops, runSequentialOnMultiFailure); + } + } + + /** + * Delete the specified node and its children. This traverse the + * znode tree for listing the children and then delete + * these znodes including the parent using multi-update api or + * sequential based on the specified configurations. + *

+ * Sets no watches. Throws all exceptions besides dealing with deletion of + * children. + *

+ * If the following is true: + *

    + *
  • runSequentialOnMultiFailure is true + *
+ * on calling multi, we get a ZooKeeper exception that can be handled by a + * sequential call(*), we retry the operations one-by-one (sequentially). + * + * @param zkw + * - zk reference + * @param runSequentialOnMultiFailure + * - if true when we get a ZooKeeper exception that could retry the + * operations one-by-one (sequentially) + * @param pathRoots + * - path of the parent node(s) + * @throws KeeperException.NotEmptyException + * if node has children while deleting + * @throws KeeperException + * if unexpected ZooKeeper exception + * @throws IllegalArgumentException + * if an invalid path is specified + */ + public static void deleteNodeRecursivelyMultiOrSequential(ZKWatcher zkw, + boolean runSequentialOnMultiFailure, String... pathRoots) throws KeeperException { + if (pathRoots == null || pathRoots.length <= 0) { + LOG.warn("Given path is not valid!"); + return; + } + List ops = new ArrayList<>(); + for (String eachRoot : pathRoots) { + // ZooKeeper Watches are one time triggers; When children of parent nodes are deleted + // recursively, must set another watch, get notified of delete node + List children = listChildrenBFSAndWatchThem(zkw, eachRoot); + // Delete the leaves first and eventually get rid of the root + for (int i = children.size() - 1; i >= 0; --i) { + ops.add(ZKUtilOp.deleteNodeFailSilent(children.get(i))); + } + try { + if (zkw.getRecoverableZK().exists(eachRoot, zkw) != null) { + ops.add(ZKUtilOp.deleteNodeFailSilent(eachRoot)); + } + } catch (InterruptedException e) { + zkw.interruptedException(e); + } + } + // atleast one element should exist + if (ops.size() > 0) { + multiOrSequential(zkw, ops, runSequentialOnMultiFailure); + } + } + + /** + * BFS Traversal of all the children under path, with the entries in the list, + * in the same order as that of the traversal. Lists all the children without + * setting any watches. + * + * @param zkw + * - zk reference + * @param znode + * - path of node + * @return list of children znodes under the path + * @throws KeeperException + * if unexpected ZooKeeper exception + */ + private static List listChildrenBFSNoWatch(ZKWatcher zkw, + final String znode) throws KeeperException { + Deque queue = new LinkedList<>(); + List tree = new ArrayList<>(); + queue.add(znode); + while (true) { + String node = queue.pollFirst(); + if (node == null) { + break; + } + List children = listChildrenNoWatch(zkw, node); + if (children == null) { + continue; + } + for (final String child : children) { + final String childPath = node + "/" + child; + queue.add(childPath); + tree.add(childPath); + } + } + return tree; + } + + /** + * BFS Traversal of all the children under path, with the entries in the list, + * in the same order as that of the traversal. + * Lists all the children and set watches on to them. + * + * @param zkw + * - zk reference + * @param znode + * - path of node + * @return list of children znodes under the path + * @throws KeeperException + * if unexpected ZooKeeper exception + */ + private static List listChildrenBFSAndWatchThem(ZKWatcher zkw, final String znode) + throws KeeperException { + Deque queue = new LinkedList<>(); + List tree = new ArrayList<>(); + queue.add(znode); + while (true) { + String node = queue.pollFirst(); + if (node == null) { + break; + } + List children = listChildrenAndWatchThem(zkw, node); + if (children == null) { + continue; + } + for (final String child : children) { + final String childPath = node + "/" + child; + queue.add(childPath); + tree.add(childPath); + } + } + return tree; + } + + /** + * Represents an action taken by ZKUtil, e.g. createAndFailSilent. + * These actions are higher-level than ZKOp actions, which represent + * individual actions in the ZooKeeper API, like create. + */ + public abstract static class ZKUtilOp { + private String path; + + private ZKUtilOp(String path) { + this.path = path; + } + + /** + * @return a createAndFailSilent ZKUtilOp + */ + public static ZKUtilOp createAndFailSilent(String path, byte[] data) { + return new CreateAndFailSilent(path, data); + } + + /** + * @return a deleteNodeFailSilent ZKUtilOP + */ + public static ZKUtilOp deleteNodeFailSilent(String path) { + return new DeleteNodeFailSilent(path); + } + + /** + * @return a setData ZKUtilOp + */ + public static ZKUtilOp setData(String path, byte [] data) { + return new SetData(path, data); + } + + /** + * @return path to znode where the ZKOp will occur + */ + public String getPath() { + return path; + } + + /** + * ZKUtilOp representing createAndFailSilent in ZooKeeper + * (attempt to create node, ignore error if already exists) + */ + public static class CreateAndFailSilent extends ZKUtilOp { + private byte [] data; + + private CreateAndFailSilent(String path, byte [] data) { + super(path); + this.data = data; + } + + public byte[] getData() { + return data; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CreateAndFailSilent)) return false; + + CreateAndFailSilent op = (CreateAndFailSilent) o; + return getPath().equals(op.getPath()) && Arrays.equals(data, op.data); + } + + @Override + public int hashCode() { + int ret = 17 + getPath().hashCode() * 31; + return ret * 31 + Bytes.hashCode(data); + } + } + + /** + * ZKUtilOp representing deleteNodeFailSilent in ZooKeeper + * (attempt to delete node, ignore error if node doesn't exist) + */ + public static class DeleteNodeFailSilent extends ZKUtilOp { + private DeleteNodeFailSilent(String path) { + super(path); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof DeleteNodeFailSilent)) return false; + + return super.equals(o); + } + + @Override + public int hashCode() { + return getPath().hashCode(); + } + } + + /** + * ZKUtilOp representing setData in ZooKeeper + */ + public static class SetData extends ZKUtilOp { + private byte [] data; + + private SetData(String path, byte [] data) { + super(path); + this.data = data; + } + + public byte[] getData() { + return data; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof SetData)) return false; + + SetData op = (SetData) o; + return getPath().equals(op.getPath()) && Arrays.equals(data, op.data); + } + + @Override + public int hashCode() { + int ret = getPath().hashCode(); + return ret * 31 + Bytes.hashCode(data); + } + } + } + + /** + * Convert from ZKUtilOp to ZKOp + */ + private static Op toZooKeeperOp(ZKWatcher zkw, ZKUtilOp op) + throws UnsupportedOperationException { + if(op == null) return null; + + if (op instanceof CreateAndFailSilent) { + CreateAndFailSilent cafs = (CreateAndFailSilent)op; + return Op.create(cafs.getPath(), cafs.getData(), createACL(zkw, cafs.getPath()), + CreateMode.PERSISTENT); + } else if (op instanceof DeleteNodeFailSilent) { + DeleteNodeFailSilent dnfs = (DeleteNodeFailSilent)op; + return Op.delete(dnfs.getPath(), -1); + } else if (op instanceof SetData) { + SetData sd = (SetData)op; + return Op.setData(sd.getPath(), sd.getData(), -1); + } else { + throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " + + op.getClass().getName()); + } + } + + /** + * Use ZooKeeper's multi-update functionality. + * + * If all of the following are true: + * - runSequentialOnMultiFailure is true + * - on calling multi, we get a ZooKeeper exception that can be handled by a sequential call(*) + * Then: + * - we retry the operations one-by-one (sequentially) + * + * Note *: an example is receiving a NodeExistsException from a "create" call. Without multi, + * a user could call "createAndFailSilent" to ensure that a node exists if they don't care who + * actually created the node (i.e. the NodeExistsException from ZooKeeper is caught). + * This will cause all operations in the multi to fail, however, because + * the NodeExistsException that zk.create throws will fail the multi transaction. + * In this case, if the previous conditions hold, the commands are run sequentially, which should + * result in the correct final state, but means that the operations will not run atomically. + * + * @throws KeeperException + */ + public static void multiOrSequential(ZKWatcher zkw, List ops, + boolean runSequentialOnMultiFailure) throws KeeperException { + if (zkw.getConfiguration().get("hbase.zookeeper.useMulti") != null) { + LOG.warn("hbase.zookeeper.useMulti is deprecated. Default to true always."); + } + if (ops == null) return; + + List zkOps = new LinkedList<>(); + for (ZKUtilOp op : ops) { + zkOps.add(toZooKeeperOp(zkw, op)); + } + try { + zkw.getRecoverableZK().multi(zkOps); + } catch (KeeperException ke) { + switch (ke.code()) { + case NODEEXISTS: + case NONODE: + case BADVERSION: + case NOAUTH: + // if we get an exception that could be solved by running sequentially + // (and the client asked us to), then break out and run sequentially + if (runSequentialOnMultiFailure) { + LOG.info("On call to ZK.multi, received exception: " + ke.toString() + "." + + " Attempting to run operations sequentially because" + + " runSequentialOnMultiFailure is: " + runSequentialOnMultiFailure + "."); + processSequentially(zkw, ops); + break; + } + default: + throw ke; + } + } catch (InterruptedException ie) { + zkw.interruptedException(ie); + } + } + + private static void processSequentially(ZKWatcher zkw, List ops) + throws KeeperException, NoNodeException { + for (ZKUtilOp op : ops) { + if (op instanceof CreateAndFailSilent) { + createAndFailSilent(zkw, (CreateAndFailSilent) op); + } else if (op instanceof DeleteNodeFailSilent) { + deleteNodeFailSilent(zkw, (DeleteNodeFailSilent) op); + } else if (op instanceof SetData) { + setData(zkw, (SetData) op); + } else { + throw new UnsupportedOperationException("Unexpected ZKUtilOp type: " + + op.getClass().getName()); + } + } + } + + + // + // ZooKeeper cluster information + // + + static void appendHFileRefsZnodes(ZKWatcher zkw, String hfileRefsZnode, StringBuilder sb) throws KeeperException { + sb.append("\n").append(hfileRefsZnode).append(": "); + for (String peerIdZnode : listChildrenNoWatch(zkw, hfileRefsZnode)) { + String znodeToProcess = joinZNode(hfileRefsZnode, peerIdZnode); + sb.append("\n").append(znodeToProcess).append(": "); + List peerHFileRefsZnodes = listChildrenNoWatch(zkw, znodeToProcess); + int size = peerHFileRefsZnodes.size(); + for (int i = 0; i < size; i++) { + sb.append(peerHFileRefsZnodes.get(i)); + if (i != size - 1) { + sb.append(", "); + } + } + } + } + + /** + * Gets the statistics from the given server. + * + * @param server The server to get the statistics from. + * @param timeout The socket timeout to use. + * @return The array of response strings. + * @throws IOException When the socket communication fails. + */ + public static String[] getServerStats(String server, int timeout) + throws IOException { + String[] sp = server.split(":"); + if (sp == null || sp.length == 0) { + return null; + } + + String host = sp[0]; + int port = sp.length > 1 ? Integer.parseInt(sp[1]) + : HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT; + + InetSocketAddress sockAddr = new InetSocketAddress(host, port); + try (Socket socket = new Socket()) { + socket.connect(sockAddr, timeout); + + socket.setSoTimeout(timeout); + try (PrintWriter out = new PrintWriter(socket.getOutputStream(), true); + BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream()))) { + out.println("stat"); + out.flush(); + ArrayList res = new ArrayList<>(); + while (true) { + String line = in.readLine(); + if (line != null) { + res.add(line); + } else { + break; + } + } + return res.toArray(new String[res.size()]); + } + } + } + + private static void logRetrievedMsg(final ZKWatcher zkw, + final String znode, final byte [] data, final boolean watcherSet) { + if (!LOG.isTraceEnabled()) return; + LOG.trace(zkw.prefix("Retrieved " + ((data == null)? 0: data.length) + + " byte(s) of data from znode " + znode + + (watcherSet? " and set watcher; ": "; data=") + + (data == null? "null": data.length == 0? "empty": ( + znode.startsWith(zkw.znodePaths.metaZNodePrefix)? + getServerNameOrEmptyString(data): + znode.startsWith(zkw.znodePaths.backupMasterAddressesZNode)? + getServerNameOrEmptyString(data): + StringUtils.abbreviate(Bytes.toStringBinary(data), 32))))); + } + + private static String getServerNameOrEmptyString(final byte [] data) { + try { + return parseServerNameFrom(data).toString(); + } catch (DeserializationException e) { + return ""; + } + } + + /** + * Waits for HBase installation's base (parent) znode to become available. + * @throws IOException on ZK errors + */ + public static void waitForBaseZNode(Configuration conf) throws IOException { + LOG.info("Waiting until the base znode is available"); + String parentZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + ZooKeeper zk = new ZooKeeper(ZKConfig.getZKQuorumServersString(conf), + conf.getInt(HConstants.ZK_SESSION_TIMEOUT, + HConstants.DEFAULT_ZK_SESSION_TIMEOUT), EmptyWatcher.instance); + + final int maxTimeMs = 10000; + final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS; + + KeeperException keeperEx = null; + try { + try { + for (int attempt = 0; attempt < maxNumAttempts; ++attempt) { + try { + if (zk.exists(parentZNode, false) != null) { + LOG.info("Parent znode exists: " + parentZNode); + keeperEx = null; + break; + } + } catch (KeeperException e) { + keeperEx = e; + } + Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS); + } + } finally { + zk.close(); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + + if (keeperEx != null) { + throw new IOException(keeperEx); + } + } + + /** + * Convert a {@link DeserializationException} to a more palatable {@link KeeperException}. + * Used when can't let a {@link DeserializationException} out w/o changing public API. + * @param e Exception to convert + * @return Converted exception + */ + public static KeeperException convert(final DeserializationException e) { + KeeperException ke = new KeeperException.DataInconsistencyException(); + ke.initCause(e); + return ke; + } + + /** + * Recursively print the current state of ZK (non-transactional) + * @param root name of the root directory in zk to print + */ + public static void logZKTree(ZKWatcher zkw, String root) { + if (!LOG.isDebugEnabled()) return; + LOG.debug("Current zk system:"); + String prefix = "|-"; + LOG.debug(prefix + root); + try { + logZKTree(zkw, root, prefix); + } catch (KeeperException e) { + throw new RuntimeException(e); + } + } + + /** + * Helper method to print the current state of the ZK tree. + * @see #logZKTree(ZKWatcher, String) + * @throws KeeperException if an unexpected exception occurs + */ + protected static void logZKTree(ZKWatcher zkw, String root, String prefix) + throws KeeperException { + List children = listChildrenNoWatch(zkw, root); + if (children == null) return; + for (String child : children) { + LOG.debug(prefix + child); + String node = joinZNode(root.equals("/") ? "" : root, child); + logZKTree(zkw, node, prefix + "---"); + } + } + + + /** + * NOTE: This is copy of ProtobufUtil#parseServerNameFrom. + * This one function was preventing move of many functions from hbase-client#ZKUtil to this + * class which in turn was blocking larger hbase-zookeeper dependency untangle. + * + * Get a ServerName from the passed in data bytes. + * @param data Data with a serialize server name in it; can handle the old style + * servername where servername was host and port. Works too with data that + * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that + * has a serialized {@link ServerName} in it. + * @return Returns null if data is null else converts passed data + * to a ServerName instance. + * @throws DeserializationException + */ + static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException { + if (data == null || data.length <= 0) return null; + if (ProtobufMagic.isPBMagicPrefix(data)) { + int prefixLen = ProtobufMagic.lengthOfPBMagic(); + try { + ZooKeeperProtos.Master rss = + ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sn = + rss.getMaster(); + return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); + } catch (/*InvalidProtocolBufferException*/IOException e) { + // A failed parse of the znode is pretty catastrophic. Rather than loop + // retrying hoping the bad bytes will changes, and rather than change + // the signature on this method to add an IOE which will send ripples all + // over the code base, throw a RuntimeException. This should "never" happen. + // Fail fast if it does. + throw new DeserializationException(e); + } + } + // The str returned could be old style -- pre hbase-1502 -- which was + // hostname and port seperated by a colon rather than hostname, port and + // startcode delimited by a ','. + String str = Bytes.toString(data); + int index = str.indexOf(ServerName.SERVERNAME_SEPARATOR); + if (index != -1) { + // Presume its ServerName serialized with versioned bytes. + return ServerName.parseVersionedServerName(data); + } + // Presume it a hostname:port format. + String hostname = Addressing.parseHostname(str); + int port = Addressing.parsePort(str); + return ServerName.valueOf(hostname, port, -1L); + } +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java similarity index 100% rename from hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java rename to hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMetrics.java similarity index 70% rename from hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java rename to hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMetrics.java index a987bec629..9d941370ae 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKMetrics.java @@ -24,25 +24,25 @@ import org.junit.experimental.categories.Category; import static org.mockito.Mockito.*; @Category(SmallTests.class) -public class TestMetricsZooKeeper { +public class TestZKMetrics { @Test public void testRegisterExceptions() { MetricsZooKeeperSource zkSource = mock(MetricsZooKeeperSourceImpl.class); - MetricsZooKeeper metricsZK = new MetricsZooKeeper(zkSource); - metricsZK.registerAuthFailedException(); - metricsZK.registerConnectionLossException(); - metricsZK.registerConnectionLossException(); - metricsZK.registerDataInconsistencyException(); - metricsZK.registerInvalidACLException(); - metricsZK.registerNoAuthException(); - metricsZK.registerOperationTimeoutException(); - metricsZK.registerOperationTimeoutException(); - metricsZK.registerRuntimeInconsistencyException(); - metricsZK.registerSessionExpiredException(); - metricsZK.registerSystemErrorException(); - metricsZK.registerSystemErrorException(); - metricsZK.registerFailedZKCall(); + ZKMetrics ZKMetrics = new ZKMetrics(zkSource); + ZKMetrics.registerAuthFailedException(); + ZKMetrics.registerConnectionLossException(); + ZKMetrics.registerConnectionLossException(); + ZKMetrics.registerDataInconsistencyException(); + ZKMetrics.registerInvalidACLException(); + ZKMetrics.registerNoAuthException(); + ZKMetrics.registerOperationTimeoutException(); + ZKMetrics.registerOperationTimeoutException(); + ZKMetrics.registerRuntimeInconsistencyException(); + ZKMetrics.registerSessionExpiredException(); + ZKMetrics.registerSystemErrorException(); + ZKMetrics.registerSystemErrorException(); + ZKMetrics.registerFailedZKCall(); verify(zkSource, times(1)).incrementAuthFailedCount(); // ConnectionLoss Exception was registered twice. @@ -62,13 +62,13 @@ public class TestMetricsZooKeeper { @Test public void testLatencyHistogramUpdates() { MetricsZooKeeperSource zkSource = mock(MetricsZooKeeperSourceImpl.class); - MetricsZooKeeper metricsZK = new MetricsZooKeeper(zkSource); + ZKMetrics ZKMetrics = new ZKMetrics(zkSource); long latency = 100; - metricsZK.registerReadOperationLatency(latency); - metricsZK.registerReadOperationLatency(latency); - metricsZK.registerWriteOperationLatency(latency); - metricsZK.registerSyncOperationLatency(latency); + ZKMetrics.registerReadOperationLatency(latency); + ZKMetrics.registerReadOperationLatency(latency); + ZKMetrics.registerWriteOperationLatency(latency); + ZKMetrics.registerSyncOperationLatency(latency); // Read Operation Latency update was registered twice. verify(zkSource, times(2)).recordReadOperationLatency(latency); verify(zkSource, times(1)).recordWriteOperationLatency(latency); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java similarity index 82% rename from hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java rename to hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java index 53dcdbc50c..b78927c13a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java @@ -23,7 +23,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.security.UserGroupInformation; @@ -48,8 +47,8 @@ public class TestZKUtil { Configuration conf = HBaseConfiguration.create(); conf.set(Superusers.SUPERUSER_CONF_KEY, "user1"); String node = "/hbase/testUnsecure"; - ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false); - List aclList = ZKUtil.createACL(watcher, node, false); + ZKWatcher watcher = new ZKWatcher(conf, node, null, false); + List aclList = ZooKeeperUtil.createACL(watcher, node, false); Assert.assertEquals(aclList.size(), 1); Assert.assertTrue(aclList.contains(Ids.OPEN_ACL_UNSAFE.iterator().next())); } @@ -59,8 +58,8 @@ public class TestZKUtil { Configuration conf = HBaseConfiguration.create(); conf.set(Superusers.SUPERUSER_CONF_KEY, "user1"); String node = "/hbase/testSecuritySingleSuperuser"; - ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false); - List aclList = ZKUtil.createACL(watcher, node, true); + ZKWatcher watcher = new ZKWatcher(conf, node, null, false); + List aclList = ZooKeeperUtil.createACL(watcher, node, true); Assert.assertEquals(aclList.size(), 2); // 1+1, since ACL will be set for the creator by default Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1")))); Assert.assertTrue(aclList.contains(Ids.CREATOR_ALL_ACL.iterator().next())); @@ -71,8 +70,8 @@ public class TestZKUtil { Configuration conf = HBaseConfiguration.create(); conf.set(Superusers.SUPERUSER_CONF_KEY, "user1,@group1,user2,@group2,user3"); String node = "/hbase/testCreateACL"; - ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false); - List aclList = ZKUtil.createACL(watcher, node, true); + ZKWatcher watcher = new ZKWatcher(conf, node, null, false); + List aclList = ZooKeeperUtil.createACL(watcher, node, true); Assert.assertEquals(aclList.size(), 4); // 3+1, since ACL will be set for the creator by default Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1")))); Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group2")))); @@ -87,8 +86,8 @@ public class TestZKUtil { conf.set(Superusers.SUPERUSER_CONF_KEY, "user4,@group1,user5,user6"); UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser("user4")); String node = "/hbase/testCreateACL"; - ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false); - List aclList = ZKUtil.createACL(watcher, node, true); + ZKWatcher watcher = new ZKWatcher(conf, node, null, false); + List aclList = ZooKeeperUtil.createACL(watcher, node, true); Assert.assertEquals(aclList.size(), 3); // 3, since service user the same as one of superuser Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1")))); Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("auth", "")))); @@ -98,17 +97,17 @@ public class TestZKUtil { public void testInterruptedDuringAction() throws ZooKeeperConnectionException, IOException, KeeperException, InterruptedException { - final RecoverableZooKeeper recoverableZk = Mockito.mock(RecoverableZooKeeper.class); - ZooKeeperWatcher zkw = new ZooKeeperWatcher(HBaseConfiguration.create(), "unittest", null) { + final RecoverableZK recoverableZk = Mockito.mock(RecoverableZK.class); + ZKWatcher zkw = new ZKWatcher(HBaseConfiguration.create(), "unittest", null) { @Override - public RecoverableZooKeeper getRecoverableZooKeeper() { + public RecoverableZK getRecoverableZK() { return recoverableZk; } }; Mockito.doThrow(new InterruptedException()).when(recoverableZk) .getChildren(zkw.znodePaths.baseZNode, null); try { - ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.baseZNode); + ZooKeeperUtil.listChildrenNoWatch(zkw, zkw.znodePaths.baseZNode); } catch (KeeperException.SystemErrorException e) { // expected return; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKWatcher.java similarity index 89% rename from hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java rename to hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKWatcher.java index de2ec2a236..1a26c53456 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKWatcher.java @@ -24,17 +24,16 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; @Category({ SmallTests.class }) -public class TestZooKeeperWatcher { +public class TestZKWatcher { @Test public void testIsClientReadable() throws ZooKeeperConnectionException, IOException { - ZooKeeperWatcher watcher = new ZooKeeperWatcher(HBaseConfiguration.create(), + ZKWatcher watcher = new ZKWatcher(HBaseConfiguration.create(), "testIsClientReadable", null, false); assertTrue(watcher.isClientReadable(watcher.znodePaths.baseZNode)); @@ -42,7 +41,7 @@ public class TestZooKeeperWatcher { assertTrue(watcher.isClientReadable(watcher.znodePaths.masterAddressZNode)); assertTrue(watcher.isClientReadable(watcher.znodePaths.clusterIdZNode)); assertTrue(watcher.isClientReadable(watcher.znodePaths.tableZNode)); - assertTrue(watcher.isClientReadable(ZKUtil.joinZNode(watcher.znodePaths.tableZNode, "foo"))); + assertTrue(watcher.isClientReadable(ZooKeeperUtil.joinZNode(watcher.znodePaths.tableZNode, "foo"))); assertTrue(watcher.isClientReadable(watcher.znodePaths.rsZNode)); assertFalse(watcher.isClientReadable(watcher.znodePaths.tableLockZNode)); diff --git a/pom.xml b/pom.xml index 2a9b8c9944..45cb602d5b 100755 --- a/pom.xml +++ b/pom.xml @@ -68,6 +68,7 @@ hbase-resource-bundle hbase-http hbase-server + hbase-zookeeper hbase-thrift hbase-shell hbase-protocol-shaded @@ -1657,6 +1658,18 @@ test-jar test
+ + hbase-zookeeper + org.apache.hbase + ${project.version} + + + hbase-zookeeper + org.apache.hbase + ${project.version} + test-jar + test + hbase-server org.apache.hbase -- 2.14.1