From 8a59e418ed1509bc080e69c7dfc58da35ed5b1f5 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Tue, 3 Oct 2017 09:26:19 -0700 Subject: [PATCH] HBASE-12260 MasterServices - remove from coprocessor API (Discuss) Marks MasterServices as Coprocessor conduit to a subset of Master facility. Compensates for the removals by using Admin (Admin calls from Master short-circuit RPC). Added at least getServers to Admin Interface and made ServerListener LP.COPROCs to help with the transition. RSGroups needs work still. It is currently being fixed up to work on AMv2 concurrently. Will come back here when that work is done to finish. Meantime a few corners have been disabled. Purges from Server: getClusterConnection, getMetaTableLocator, getCoordinatedStateManager. Purges from MasterServices: getServerManager, getMasterFileSystem, getMasterWALManager, getTableStateManager, getMasterMetrics (may put this back later), getRegionNormalizer, mergeRegions, splitRegion, createTable, deleteTable, etc. (for these latter table manipulations, go via the Admin Interface), getMasterCoprocessorHost, getMasterQuotaManager, getAssignmentManager, getCatalogJanitor, getSnapshotManager, getMasterPrcoedureManagerHost, getClusterSchema, abortProcedure, getProcedures, getLocks, getLoadBalancer, all to do w/ replication changes and draining servers. Adds registering of ServerListener and unregistering to MasterServices. --- .../master/LogRollMasterProcedureManager.java | 9 +- .../LogRollRegionServerProcedureManager.java | 35 +- .../org/apache/hadoop/hbase/ClusterStatus.java | 3 + .../java/org/apache/hadoop/hbase/client/Admin.java | 6 + .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 8 +- .../hbase/client/ShortCircuitMasterConnection.java | 2 +- .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 5 +- .../hbase/shaded/protobuf/RequestConverter.java | 5 +- .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java | 2 - .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java | 11 +- .../hadoop/hbase/rsgroup/RSGroupAdminServer.java | 313 +++++++------- .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 20 +- .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 279 ++++++------ .../org/apache/hadoop/hbase/rsgroup/Utility.java | 15 +- .../balancer/TestRSGroupBasedLoadBalancer.java | 12 +- .../main/java/org/apache/hadoop/hbase/Server.java | 27 +- .../coordination/BaseCoordinatedStateManager.java | 2 +- .../coordination/SplitLogManagerCoordination.java | 36 +- .../ZKSplitLogManagerCoordination.java | 11 +- .../coprocessor/MasterCoprocessorEnvironment.java | 3 +- .../hadoop/hbase/coprocessor/MasterObserver.java | 13 +- .../hbase/favored/FavoredNodeLoadBalancer.java | 14 +- .../org/apache/hadoop/hbase/ipc/RpcServer.java | 8 +- .../apache/hadoop/hbase/master/CatalogJanitor.java | 54 +-- .../apache/hadoop/hbase/master/ClusterSchema.java | 2 +- .../hbase/master/ClusterSchemaServiceImpl.java | 19 +- .../org/apache/hadoop/hbase/master/HMaster.java | 193 ++++----- .../apache/hadoop/hbase/master/LoadBalancer.java | 4 +- .../hadoop/hbase/master/MasterCoprocessorHost.java | 12 +- .../hadoop/hbase/master/MasterRpcServices.java | 11 +- .../apache/hadoop/hbase/master/MasterServices.java | 434 +------------------ .../hadoop/hbase/master/MasterWalManager.java | 24 +- .../apache/hadoop/hbase/master/ServerListener.java | 5 +- .../apache/hadoop/hbase/master/ServerManager.java | 14 +- .../hadoop/hbase/master/SplitLogManager.java | 18 +- .../hadoop/hbase/master/TableNamespaceManager.java | 36 +- .../hadoop/hbase/master/TableStateManager.java | 4 +- .../hbase/master/assignment/AssignProcedure.java | 2 +- .../hbase/master/assignment/AssignmentManager.java | 8 +- .../assignment/GCMergedRegionsProcedure.java | 2 +- .../hbase/master/assignment/GCRegionProcedure.java | 17 +- .../assignment/MergeTableRegionsProcedure.java | 18 +- .../hbase/master/assignment/RegionStateStore.java | 5 +- .../assignment/RegionTransitionProcedure.java | 2 +- .../assignment/SplitTableRegionProcedure.java | 16 +- .../hbase/master/assignment/UnassignProcedure.java | 2 +- .../hadoop/hbase/master/assignment/Util.java | 4 +- .../hbase/master/balancer/BaseLoadBalancer.java | 23 +- .../master/balancer/FavoredStochasticBalancer.java | 16 +- .../master/balancer/RegionLocationFinder.java | 16 +- .../master/balancer/StochasticLoadBalancer.java | 53 +-- .../master/cleaner/ReplicationMetaCleaner.java | 6 +- .../hadoop/hbase/master/locking/LockManager.java | 2 +- .../hbase/master/normalizer/RegionNormalizer.java | 6 +- .../master/normalizer/SimpleRegionNormalizer.java | 34 +- .../AbstractStateMachineRegionProcedure.java | 2 +- .../AbstractStateMachineTableProcedure.java | 2 +- .../master/procedure/AddColumnFamilyProcedure.java | 10 +- .../master/procedure/CloneSnapshotProcedure.java | 14 +- .../master/procedure/CreateNamespaceProcedure.java | 10 +- .../master/procedure/CreateTableProcedure.java | 37 +- .../procedure/DeleteColumnFamilyProcedure.java | 8 +- .../master/procedure/DeleteNamespaceProcedure.java | 8 +- .../master/procedure/DeleteTableProcedure.java | 18 +- .../master/procedure/DisableTableProcedure.java | 8 +- .../master/procedure/EnableTableProcedure.java | 13 +- .../master/procedure/MasterDDLOperationHelper.java | 2 +- .../hbase/master/procedure/MasterProcedureEnv.java | 13 +- .../master/procedure/MasterProcedureUtil.java | 15 +- .../procedure/ModifyColumnFamilyProcedure.java | 8 +- .../master/procedure/ModifyNamespaceProcedure.java | 2 +- .../master/procedure/ModifyTableProcedure.java | 16 +- .../hbase/master/procedure/ProcedureSyncWait.java | 6 +- .../master/procedure/RSProcedureDispatcher.java | 13 +- .../master/procedure/RecoverMetaProcedure.java | 2 +- .../master/procedure/RestoreSnapshotProcedure.java | 24 +- .../master/procedure/ServerCrashProcedure.java | 20 +- .../master/procedure/TruncateTableProcedure.java | 4 +- .../snapshot/DisabledTableSnapshotHandler.java | 7 +- .../snapshot/EnabledTableSnapshotHandler.java | 4 +- .../master/snapshot/MasterSnapshotVerifier.java | 20 +- .../master/snapshot/SnapshotHFileCleaner.java | 4 +- .../hbase/master/snapshot/SnapshotManager.java | 15 +- .../hbase/master/snapshot/TakeSnapshotHandler.java | 22 +- .../hadoop/hbase/namespace/NamespaceAuditor.java | 12 +- .../hbase/namespace/NamespaceStateManager.java | 10 +- .../hbase/procedure/MasterProcedureManager.java | 7 +- .../procedure/MasterProcedureManagerHost.java | 10 +- .../hadoop/hbase/procedure/ProcedureManager.java | 3 +- .../procedure/RegionServerProcedureManager.java | 5 +- .../RegionServerProcedureManagerHost.java | 8 +- .../flush/MasterFlushTableProcedureManager.java | 6 +- .../RegionServerFlushTableProcedureManager.java | 38 +- .../hadoop/hbase/quotas/MasterQuotaManager.java | 73 ++-- .../quotas/RegionServerSpaceQuotaManager.java | 2 +- .../DisableTableViolationPolicyEnforcement.java | 4 +- .../hadoop/hbase/regionserver/HRegionServer.java | 10 +- .../apache/hadoop/hbase/regionserver/Region.java | 4 + .../hbase/regionserver/RegionServerServices.java | 3 + .../hbase/regionserver/ReplicationService.java | 2 +- .../hadoop/hbase/regionserver/SplitLogWorker.java | 21 +- .../apache/hadoop/hbase/regionserver/Store.java | 3 + .../snapshot/RegionServerSnapshotManager.java | 34 +- .../replication/regionserver/Replication.java | 5 +- .../regionserver/ReplicationSyncUp.java | 18 +- .../hbase/security/access/AccessControlLists.java | 49 ++- .../hbase/security/access/AccessController.java | 13 +- .../security/visibility/VisibilityController.java | 5 +- .../hbase/zookeeper/RegionServerTracker.java | 26 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 2 +- .../hadoop/hbase/MockRegionServerServices.java | 18 +- .../hadoop/hbase/client/TestFromClientSide3.java | 1 + .../coprocessor/TestCoprocessorConfiguration.java | 5 +- .../hadoop/hbase/master/MockNoopHMaster.java | 126 ++++++ .../hbase/master/MockNoopMasterServices.java | 472 --------------------- .../hadoop/hbase/master/MockRegionServer.java | 18 +- .../hbase/master/TestActiveMasterManager.java | 24 +- .../hbase/master/TestAssignmentListener.java | 21 +- .../hadoop/hbase/master/TestCatalogJanitor.java | 70 +-- .../hbase/master/TestClockSkewDetection.java | 10 +- .../hadoop/hbase/master/TestMasterNoCluster.java | 10 +- .../hadoop/hbase/master/TestRegionPlacement2.java | 6 +- .../hadoop/hbase/master/TestSplitLogManager.java | 22 +- .../{MockMasterServices.java => MockHMaster.java} | 41 +- .../master/assignment/TestAssignmentManager.java | 12 +- .../master/balancer/TestBaseLoadBalancer.java | 14 +- .../TestFavoredStochasticBalancerPickers.java | 2 +- .../master/balancer/TestRegionLocationFinder.java | 2 +- .../balancer/TestStochasticLoadBalancer.java | 6 +- .../hbase/master/cleaner/TestHFileCleaner.java | 20 +- .../hbase/master/cleaner/TestHFileLinkCleaner.java | 20 +- .../hbase/master/cleaner/TestLogsCleaner.java | 21 +- .../cleaner/TestReplicationHFileCleaner.java | 18 - .../hbase/master/locking/TestLockManager.java | 18 +- .../normalizer/TestSimpleRegionNormalizer.java | 28 +- .../procedure/MasterProcedureTestingUtility.java | 7 +- .../hbase/master/snapshot/TestSnapshotManager.java | 14 +- .../procedure/SimpleMasterProcedureManager.java | 6 +- .../hbase/procedure/SimpleRSProcedureManager.java | 21 +- .../hbase/quotas/TestMasterQuotaManager.java | 9 +- .../hbase/regionserver/TestHeapMemoryManager.java | 20 +- .../hbase/regionserver/TestSplitLogWorker.java | 65 +-- .../hadoop/hbase/regionserver/TestWALLockup.java | 20 +- .../replication/TestReplicationStateHBaseImpl.java | 19 +- .../replication/TestReplicationStateZKImpl.java | 20 +- .../replication/TestReplicationTrackerZKImpl.java | 18 - .../regionserver/TestReplicationSourceManager.java | 15 - .../security/token/TestTokenAuthentication.java | 19 - .../org/apache/hadoop/hbase/util/MockServer.java | 136 ------ 149 files changed, 1380 insertions(+), 2615 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopHMaster.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java rename hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/{MockMasterServices.java => MockHMaster.java} (96%) delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java index 62b2df7119..6cf6696294 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,11 +30,11 @@ import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure.Procedure; @@ -62,7 +62,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager { public static final int BACKUP_WAKE_MILLIS_DEFAULT = 500; public static final int BACKUP_TIMEOUT_MILLIS_DEFAULT = 180000; public static final int BACKUP_POOL_THREAD_NUMBER_DEFAULT = 8; - private MasterServices master; + private HMaster master; private ProcedureCoordinator coordinator; private boolean done; @@ -77,7 +77,7 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager { } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) + public void initialize(HMaster master, MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; this.done = false; @@ -167,5 +167,4 @@ public class LogRollMasterProcedureManager extends MasterProcedureManager { public boolean isProcedureDone(ProcedureDescription desc) throws IOException { return done; } - } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java index 5ab7facf9b..04c5576a67 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -36,7 +37,6 @@ import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager; import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.SubprocedureFactory; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.zookeeper.KeeperException; /** @@ -68,7 +68,7 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa /** Default amount of time to check for errors while regions finish backup work */ private static final long BACKUP_REQUEST_WAKE_MILLIS_DEFAULT = 500; - private RegionServerServices rss; + private HRegionServer hrs; private ProcedureMemberRpcs memberRpcs; private ProcedureMember member; private boolean started = false; @@ -84,12 +84,12 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa */ @Override public void start() { - if (!BackupManager.isBackupEnabled(rss.getConfiguration())) { + if (!BackupManager.isBackupEnabled(hrs.getConfiguration())) { LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting"); return; } - this.memberRpcs.start(rss.getServerName().toString(), member); + this.memberRpcs.start(hrs.getServerName().toString(), member); started = true; LOG.info("Started region server backup manager."); } @@ -121,21 +121,21 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa public Subprocedure buildSubprocedure(byte[] data) { // don't run a backup if the parent is stop(ping) - if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName() + if (hrs.isStopping() || hrs.isStopped()) { + throw new IllegalStateException("Can't start backup procedure on RS: " + hrs.getServerName() + ", because stopping/stopped!"); } LOG.info("Attempting to run a roll log procedure for backup."); ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher(); - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); long wakeMillis = conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT); LogRollBackupSubprocedurePool taskManager = - new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); - return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis, + new LogRollBackupSubprocedurePool(hrs.getServerName().toString(), conf); + return new LogRollBackupSubprocedure(hrs, member, errorDispatcher, wakeMillis, timeoutMillis, taskManager, data); } @@ -152,28 +152,28 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa } @Override - public void initialize(RegionServerServices rss) throws KeeperException { - this.rss = rss; - if (!BackupManager.isBackupEnabled(rss.getConfiguration())) { + public void initialize(HRegionServer hrs) throws KeeperException { + this.hrs = hrs; + if (!BackupManager.isBackupEnabled(hrs.getConfiguration())) { LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting"); return; } BaseCoordinatedStateManager coordManager = (BaseCoordinatedStateManager) CoordinatedStateManagerFactory. - getCoordinatedStateManager(rss.getConfiguration()); - coordManager.initialize(rss); + getCoordinatedStateManager(hrs.getConfiguration()); + coordManager.initialize(hrs); this.memberRpcs = coordManager .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); // read in the backup handler configuration properties - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); long keepAlive = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT); // create the actual cohort member ThreadPoolExecutor pool = - ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); + ProcedureMember.defaultPool(hrs.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder()); } @@ -181,5 +181,4 @@ public class LogRollRegionServerProcedureManager extends RegionServerProcedureMa public String getProcedureSignature() { return "backup-proc"; } - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 1c2224710e..a4b7b36b71 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -229,6 +229,9 @@ public class ClusterStatus { // Getters // + /** + * @return currently registered RegionServers + */ public Collection getServers() { if (liveServers == null) { return Collections.emptyList(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 64d5e5306c..001d2cd8be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2485,4 +2485,10 @@ public interface Admin extends Abortable, Closeable { * @return List of servers that are not cleared */ List clearDeadServers(final List servers) throws IOException; + + /** + * @return currently online RegionServers in cluster. + * @throws IOException + */ + Collection getServers() throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 8665e84646..3e2e29e685 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -4383,4 +4383,10 @@ public class HBaseAdmin implements Admin { } }); } + + @Override + public Collection getServers() throws IOException { + // The Collection returned out of ClusterStatus is unmodifiable which is what we want. + return getClusterStatus(EnumSet.of(Option.LIVE_SERVERS)).getServers(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java index 826e6decce..e1fb5cedeb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index e566704a32..4e556a5aeb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,8 +43,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ByteBufferCell; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellBuilder; -import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; @@ -81,7 +79,6 @@ import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLoadStats; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.SnapshotDescription; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 2fbbc3fc88..e8fedb2601 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -1560,9 +1560,6 @@ public final class RequestConverter { return IS_CATALOG_JANITOR_ENABLED_REQUEST; } - /** - * @see {@link #buildCleanerChoreRequest} - */ private static final RunCleanerChoreRequest CLEANER_CHORE_REQUEST = RunCleanerChoreRequest.newBuilder().build(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 30913dc2eb..acd54e7b9f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -34,8 +34,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.AuthUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.Superusers; diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java index ae49253986..54e85a4016 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -286,9 +287,11 @@ public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver { } void assignTableToGroup(TableDescriptor desc) throws IOException { - String groupName = - master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString()) - .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + String groupName; + try (Admin admin = this.master.getConnection().getAdmin()) { + groupName = admin.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString()). + getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + } if (groupName == null) { groupName = RSGroupInfo.DEFAULT_GROUP; } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index b13dafd195..1b4289ac40 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,10 +23,10 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.SortedSet; import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; @@ -34,19 +34,12 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; -import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.procedure2.LockType; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; @@ -81,17 +74,13 @@ public class RSGroupAdminServer implements RSGroupAdmin { return groupName == null? null: rsGroupInfoManager.getRSGroup(groupName); } - private void checkOnlineServersOnly(Set
servers) throws ConstraintException { + private void checkOnlineServersOnly(Set
servers) throws IOException { // This uglyness is because we only have Address, not ServerName. // Online servers are keyed by ServerName. - Set
onlineServers = new HashSet<>(); - for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { - onlineServers.add(server.getAddress()); - } + Set
onlineAddresses = Utility.getOnlineAddresses(this.master); for (Address el: servers) { - if (!onlineServers.contains(el)) { - throw new ConstraintException( - "Server " + el + " is not an online server in 'default' RSGroup."); + if (!onlineAddresses.contains(el)) { + throw new ConstraintException("Server " + el + " is not online in 'default' RSGroup."); } } } @@ -113,31 +102,17 @@ public class RSGroupAdminServer implements RSGroupAdmin { } /** - * @return List of Regions associated with this server. + * @return List of Regions associated with this server or the empty list if none. */ - private List getRegions(final Address server) { - LinkedList regions = new LinkedList<>(); - for (Map.Entry el : - master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { - if (el.getValue() == null) continue; - if (el.getValue().getAddress().equals(server)) { - addRegion(regions, el.getKey()); - } - } - for (RegionStateNode state : master.getAssignmentManager().getRegionsInTransition()) { - if (state.getRegionLocation().getAddress().equals(server)) { - addRegion(regions, state.getRegionInfo()); + private List getRegions(final Address server) throws IOException { + try (Admin admin = this.master.getConnection().getAdmin()) { + for (ServerName serverName: admin.getServers()) { + if (serverName.getAddress().equals(server)) { + return admin.getRegions(serverName); + } } } - return regions; - } - - private void addRegion(final LinkedList regions, RegionInfo hri) { - // If meta, move it last otherwise other unassigns fail because meta is not - // online for them to update state in. This is dodgy. Needs to be made more - // robust. See TODO below. - if (hri.isMetaRegion()) regions.addLast(hri); - else regions.addFirst(hri); + return Collections.emptyList(); } /** @@ -220,11 +195,14 @@ public class RSGroupAdminServer implements RSGroupAdmin { for (RegionInfo region: regions) { // Regions might get assigned from tables of target group so we need to filter if (!targetGrp.containsTable(region.getTable())) { - this.master.getAssignmentManager().unassign(region); - if (master.getAssignmentManager().getRegionStates(). - getRegionState(region).isFailedOpen()) { - continue; - } +// // TODO!!!!!!!!! COMMENTED OUT FOR NOW!!!! BALAZS IS FIXING THIS CURRENTLY!!! +// /* +// this.master.getAssignmentManager().unassign(region); +// if (master.getAssignmentManager().getRegionStates(). +// getRegionState(region).isFailedOpen()) { +// continue; +// } +// */ foundRegionsToUnassign = true; } } @@ -248,28 +226,28 @@ public class RSGroupAdminServer implements RSGroupAdmin { * @param servers the regions of tables assigned to these servers will not unassign * @throws IOException */ - private void unassignRegionFromTables(Set tables, String targetGroupName, - Set
servers) throws IOException { + private void unassignRegionFromTables(Set tables, String targetGroupName, Set
servers) throws IOException { for (TableName table: tables) { LOG.info("Unassigning region(s) from " + table + " for table move to " + targetGroupName); - LockManager.MasterLock lock = master.getLockManager().createMasterLock(table, - LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move"); - try { - try { - lock.acquire(); - } catch (InterruptedException e) { - throw new IOException("Interrupted when waiting for table lock", e); - } - for (RegionInfo region : - master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) { - ServerName sn = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(region); - if (!servers.contains(sn.getAddress())) { - master.getAssignmentManager().unassign(region); - } - } - } finally { - lock.release(); - } + // BALAZS IS WORKING ON THIS!!! TODO. NEED TO FIGURE HOW TO GIVE OUT TABLE LOCK +// LockManager.MasterLock lock = master.getLockManager().createMasterLock(table, +// LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move"); +// try { +// try { +// lock.acquire(); +// } catch (InterruptedException e) { +// throw new IOException("Interrupted when waiting for table lock", e); +// } +// for (RegionInfo region : +// master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) { +// ServerName sn = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(region); +// if (!servers.contains(sn.getAddress())) { +// master.getAssignmentManager().unassign(region); +// } +// } +// } finally { +// lock.release(); +// } } } @@ -292,9 +270,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { + /** TODO THIS AIN'T ALLOWED!!! if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName); } + */ // Presume first server's source group. Later ensure all servers are from this group. Address firstServer = servers.iterator().next(); RSGroupInfo srcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer); @@ -349,6 +329,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { for (RegionInfo region: regions) { // Regions might get assigned from tables of target group so we need to filter if (!targetGrp.containsTable(region.getTable())) { + /** BALAZS FIXING THIS UPDATING. TODO this.master.getAssignmentManager().unassign(region); if (master.getAssignmentManager().getRegionStates(). getRegionState(region).isFailedOpen()) { @@ -357,6 +338,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { // than mark region as 'foundRegionsToUnassign'. continue; } + */ foundRegionsToUnassign = true; } } @@ -372,9 +354,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { Thread.currentThread().interrupt(); } } while (foundRegionsToUnassign); + /** TODO: NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName); } + */ LOG.info("Move server done: " + srcGrp.getName() + "=>" + targetGroupName); } } @@ -392,9 +376,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { + /** TODO NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup); } + **/ if(targetGroup != null) { RSGroupInfo destGroup = rsGroupInfoManager.getRSGroup(targetGroup); if(destGroup == null) { @@ -414,11 +400,14 @@ public class RSGroupAdminServer implements RSGroupAdmin { } } rsGroupInfoManager.moveTables(tables, targetGroup); + /** TODO: NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postMoveTables(tables, targetGroup); } + */ } for (TableName table: tables) { + /** TODO: BALAZS LockManager.MasterLock lock = master.getLockManager().createMasterLock(table, LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move"); try { @@ -434,18 +423,23 @@ public class RSGroupAdminServer implements RSGroupAdmin { } finally { lock.release(); } + */ } } @Override public void addRSGroup(String name) throws IOException { + /** TODO NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preAddRSGroup(name); } + */ rsGroupInfoManager.addRSGroup(new RSGroupInfo(name)); + /** TODO NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postAddRSGroup(name); } + */ } @Override @@ -453,9 +447,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { // Hold a lock on the manager instance while moving servers to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { + /** TODO: NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preRemoveRSGroup(name); } + */ RSGroupInfo rsGroupInfo = rsGroupInfoManager.getRSGroup(name); if (rsGroupInfo == null) { throw new ConstraintException("RSGroup " + name + " does not exist"); @@ -472,78 +468,90 @@ public class RSGroupAdminServer implements RSGroupAdmin { " servers; you must remove these servers from the RSGroup before" + "the RSGroup can be removed."); } - for (NamespaceDescriptor ns: master.getClusterSchema().getNamespaces()) { - String nsGroup = ns.getConfigurationValue(rsGroupInfo.NAMESPACE_DESC_PROP_GROUP); - if (nsGroup != null && nsGroup.equals(name)) { - throw new ConstraintException("RSGroup " + name + " is referenced by namespace: " + - ns.getName()); + try (Admin admin = this.master.getConnection().getAdmin()) { + for (NamespaceDescriptor ns: admin.listNamespaceDescriptors()) { + String nsGroup = ns.getConfigurationValue(rsGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if (nsGroup != null && nsGroup.equals(name)) { + throw new ConstraintException("RSGroup " + name + " is referenced by namespace: " + + ns.getName()); + } } } rsGroupInfoManager.removeRSGroup(name); + /** TODO NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postRemoveRSGroup(name); } + */ } } @Override public boolean balanceRSGroup(String groupName) throws IOException { + /* ServerManager serverManager = master.getServerManager(); AssignmentManager assignmentManager = master.getAssignmentManager(); LoadBalancer balancer = master.getLoadBalancer(); - - synchronized (balancer) { - // If balance not true, don't run balancer. - if (!((HMaster) master).isBalancerOn()) return false; - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preBalanceRSGroup(groupName); - } - if (getRSGroupInfo(groupName) == null) { - throw new ConstraintException("RSGroup does not exist: "+groupName); - } - // Only allow one balance run at at time. - Map groupRIT = rsGroupGetRegionsInTransition(groupName); - if (groupRIT.size() > 0) { - LOG.debug("Not running balancer because " + groupRIT.size() + " region(s) in transition: " + - StringUtils.abbreviate( - master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), - 256)); - return false; - } - if (serverManager.areDeadServersInProgress()) { - LOG.debug("Not running balancer because processing dead regionserver(s): " + - serverManager.getDeadServers()); - return false; - } - - //We balance per group instead of per table - List plans = new ArrayList<>(); - for(Map.Entry>> tableMap: - getRSGroupAssignmentsByTable(groupName).entrySet()) { - LOG.info("Creating partial plan for table " + tableMap.getKey() + ": " - + tableMap.getValue()); - List partialPlans = balancer.balanceCluster(tableMap.getValue()); - LOG.info("Partial plan for table " + tableMap.getKey() + ": " + partialPlans); - if (partialPlans != null) { - plans.addAll(partialPlans); - } - } - long startTime = System.currentTimeMillis(); - boolean balancerRan = !plans.isEmpty(); - if (balancerRan) { - LOG.info("RSGroup balance " + groupName + " starting with plan count: " + plans.size()); - for (RegionPlan plan: plans) { - LOG.info("balance " + plan); - assignmentManager.moveAsync(plan); - } - LOG.info("RSGroup balance " + groupName + " completed after " + - (System.currentTimeMillis()-startTime) + " seconds"); - } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan); - } - return balancerRan; - } + */ + + // TODO: FIX +// synchronized (balancer) { +// // If balance not true, don't run balancer. +// if (!((HMaster) master).isBalancerOn()) return false; +// /** TODO: Not ALLOWED +// if (master.getMasterCoprocessorHost() != null) { +// master.getMasterCoprocessorHost().preBalanceRSGroup(groupName); +// } +// */ +// if (getRSGroupInfo(groupName) == null) { +// throw new ConstraintException("RSGroup does not exist: "+groupName); +// } +// // Only allow one balance run at at time. +// if (isRSGroupRegionsInTransition(groupName)) { +// try (Admin admin = this.master.getConnection().getAdmin()) { +// LOG.debug("Not running balancer because " + groupName + +// " has region(s) in transition: " + +// StringUtils.abbreviate(admin.getClusterStatus().getRegionsInTransition().toString(), 256)); +// } +// return false; +// } +// if (serverManager.areDeadServersInProgress()) { +// LOG.debug("Not running balancer because processing dead regionserver(s): " + +// serverManager.getDeadServers()); +// return false; +// } +// +// //We balance per group instead of per table +// List plans = new ArrayList<>(); +// for(Map.Entry>> tableMap: +// getRSGroupAssignmentsByTable(groupName).entrySet()) { +// LOG.info("Creating partial plan for table " + tableMap.getKey() + ": " +// + tableMap.getValue()); +// List partialPlans = balancer.balanceCluster(tableMap.getValue()); +// LOG.info("Partial plan for table " + tableMap.getKey() + ": " + partialPlans); +// if (partialPlans != null) { +// plans.addAll(partialPlans); +// } +// } +// long startTime = System.currentTimeMillis(); +// boolean balancerRan = !plans.isEmpty(); +// if (balancerRan) { +// LOG.info("RSGroup balance " + groupName + " starting with plan count: " + plans.size()); +// for (RegionPlan plan: plans) { +// LOG.info("balance " + plan); +// assignmentManager.moveAsync(plan); +// } +// LOG.info("RSGroup balance " + groupName + " completed after " + +// (System.currentTimeMillis()-startTime) + " seconds"); +// } +// /** NOT ALLOWED +// if (master.getMasterCoprocessorHost() != null) { +// master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan); +// } +// */ +// return true; // TODO:!!!!! balancerRan; +// } + return true; // REMOVE!!! } @Override @@ -572,9 +580,11 @@ public class RSGroupAdminServer implements RSGroupAdmin { // Hold a lock on the manager instance while moving servers and tables to prevent // another writer changing our state while we are working. synchronized (rsGroupInfoManager) { + /** TODO: NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().preMoveServersAndTables(servers, tables, targetGroup); } + */ //check servers and tables status checkServersAndTables(servers, tables, targetGroup); @@ -587,27 +597,35 @@ public class RSGroupAdminServer implements RSGroupAdmin { //unassign regions which not assigned to these servers unassignRegionFromTables(tables, targetGroup, servers); + /** TODO NOT ALLOWED if (master.getMasterCoprocessorHost() != null) { master.getMasterCoprocessorHost().postMoveServersAndTables(servers, tables, targetGroup); } + */ } LOG.info("Move servers and tables done. Severs :" + servers + " , Tables : " + tables + " => " + targetGroup); } - private Map rsGroupGetRegionsInTransition(String groupName) + /** + * @param groupName + * @return True if any region from groupName is in transition. + * @throws IOException + */ + private boolean isRSGroupRegionsInTransition(String groupName) throws IOException { - Map rit = Maps.newTreeMap(); - AssignmentManager am = master.getAssignmentManager(); - for(TableName tableName : getRSGroupInfo(groupName).getTables()) { - for(RegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { - RegionState state = am.getRegionStates().getRegionTransitionState(regionInfo); - if(state != null) { - rit.put(regionInfo.getEncodedName(), state); + boolean result = false; + try (Admin admin = this.master.getConnection().getAdmin()) { + List rits = admin.getClusterStatus().getRegionsInTransition(); + SortedSet tables = getRSGroupInfo(groupName).getTables(); + for (RegionState rs: rits) { + if (tables.contains(rs.getRegion().getTable())) { + result = true; + break; } } } - return rit; + return result; } private Map>> @@ -615,26 +633,24 @@ public class RSGroupAdminServer implements RSGroupAdmin { Map>> result = Maps.newHashMap(); RSGroupInfo rsGroupInfo = getRSGroupInfo(groupName); Map>> assignments = Maps.newHashMap(); - for(Map.Entry entry: - master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { - TableName currTable = entry.getKey().getTable(); - ServerName currServer = entry.getValue(); - RegionInfo currRegion = entry.getKey(); - if (rsGroupInfo.getTables().contains(currTable)) { - assignments.putIfAbsent(currTable, new HashMap<>()); - assignments.get(currTable).putIfAbsent(currServer, new ArrayList<>()); - assignments.get(currTable).get(currServer).add(currRegion); - } - } - Map> serverMap = Maps.newHashMap(); - for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { - if(rsGroupInfo.getServers().contains(serverName.getAddress())) { - serverMap.put(serverName, Collections.emptyList()); + try (Admin admin = this.master.getConnection().getAdmin()) { + for (ServerName serverName: admin.getServers()) { + if (rsGroupInfo.getServers().contains(serverName.getAddress())) { + serverMap.put(serverName, Collections.emptyList()); + for (RegionInfo regionInfo : admin.getRegions(serverName)) { + TableName currTable = regionInfo.getTable(); + if (rsGroupInfo.getTables().contains(currTable)) { + assignments.putIfAbsent(currTable, new HashMap<>()); + assignments.get(currTable).putIfAbsent(serverName, new ArrayList<>()); + assignments.get(currTable).get(serverName).add(regionInfo); + } + } + } } } - // add all tables that are members of the group + // Add all tables that are members of the group for(TableName tableName : rsGroupInfo.getTables()) { if(assignments.containsKey(tableName)) { result.put(tableName, new HashMap<>()); @@ -643,7 +659,6 @@ public class RSGroupAdminServer implements RSGroupAdmin { LOG.debug("Adding assignments for " + tableName + ": " + assignments.get(tableName)); } } - return result; } } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java index 3f1373f3cb..91b69b30a8 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.net.Address; @@ -56,7 +56,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; /** - * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721). * It does region balance based on a table's group membership. * * Most assignment methods contain two exclusive code paths: Online - when the group @@ -75,7 +75,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { private Configuration config; private ClusterStatus clusterStatus; - private MasterServices masterServices; + private HMaster master; private volatile RSGroupInfoManager rsGroupInfoManager; private LoadBalancer internalBalancer; @@ -101,8 +101,8 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { } @Override - public void setMasterServices(MasterServices masterServices) { - this.masterServices = masterServices; + public void setMaster(HMaster master) { + this.master = master; } @Override @@ -327,7 +327,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { Map> existingAssignments) throws HBaseIOException{ Map> correctAssignments = new TreeMap<>(); - List misplacedRegions = new LinkedList<>(); + // TODO List misplacedRegions = new LinkedList<>(); correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList<>()); for (Map.Entry> assignments : existingAssignments.entrySet()){ ServerName sName = assignments.getKey(); @@ -352,6 +352,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { //TODO bulk unassign? //unassign misplaced regions, so that they are assigned to correct groups. + /** TODO: BALAZS for(RegionInfo info: misplacedRegions) { try { this.masterServices.getAssignmentManager().unassign(info); @@ -359,6 +360,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { throw new HBaseIOException(e); } } + */ return correctAssignments; } @@ -367,7 +369,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { try { if (rsGroupInfoManager == null) { List cps = - masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); + master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); if (cps.size() != 1) { String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size(); LOG.error(msg); @@ -383,7 +385,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer { Class balancerKlass = config.getClass(HBASE_RSGROUP_LOADBALANCER_CLASS, StochasticLoadBalancer.class, LoadBalancer.class); internalBalancer = ReflectionUtils.newInstance(balancerKlass, config); - internalBalancer.setMasterServices(masterServices); + internalBalancer.setMaster(master); internalBalancer.setClusterStatus(clusterStatus); internalBalancer.setConf(config); internalBalancer.initialize(); diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java index e116f58dd5..8269ef6483 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -22,6 +22,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -36,37 +37,29 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.ServerListener; -import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.net.Address; -import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; @@ -82,8 +75,6 @@ import org.apache.zookeeper.KeeperException; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import com.google.protobuf.ServiceException; @@ -137,7 +128,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { private final MasterServices masterServices; private Table rsGroupTable; - private final ClusterConnection conn; + private final Connection conn; private final ZooKeeperWatcher watcher; private final RSGroupStartupWorker rsGroupStartupWorker = new RSGroupStartupWorker(); // contains list of groups that were last flushed to persistent store @@ -148,14 +139,14 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { private RSGroupInfoManagerImpl(MasterServices masterServices) throws IOException { this.masterServices = masterServices; this.watcher = masterServices.getZooKeeper(); - this.conn = masterServices.getClusterConnection(); + this.conn = masterServices.getConnection(); } private synchronized void init() throws IOException{ refresh(); rsGroupStartupWorker.start(); serverEventsListenerThread.start(); - masterServices.getServerManager().registerListener(serverEventsListenerThread); + masterServices.registerListener(serverEventsListenerThread); } static RSGroupInfoManager getInstance(MasterServices master) throws IOException { @@ -193,7 +184,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { // it. If not 'default' group, add server to 'dst' rsgroup EVEN IF IT IS NOT online (could be a // rsgroup of dead servers that are to come back later). Set
onlineServers = dst.getName().equals(RSGroupInfo.DEFAULT_GROUP)? - Utility.getOnlineServers(this.masterServices): null; + Utility.getOnlineAddresses(this.masterServices): null; for (Address el: servers) { src.removeServer(el); if (onlineServers != null) { @@ -363,8 +354,10 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { // refresh default group, prune NavigableSet orphanTables = new TreeSet<>(); - for(String entry: masterServices.getTableDescriptors().getAll().keySet()) { - orphanTables.add(TableName.valueOf(entry)); + try (Admin admin = this.masterServices.getConnection().getAdmin()) { + for (TableDescriptor td: admin.listTableDescriptors()) { + orphanTables.add(td.getTableName()); + } } final List specialTables; @@ -372,8 +365,10 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { specialTables = Arrays.asList(AccessControlLists.ACL_TABLE_NAME, TableName.META_TABLE_NAME, TableName.NAMESPACE_TABLE_NAME, RSGROUP_TABLE_NAME); } else { - specialTables = - masterServices.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + try (Admin admin = this.masterServices.getConnection().getAdmin()) { + specialTables = Lists.newArrayList( + admin.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)); + } } for (TableName table : specialTables) { @@ -433,10 +428,11 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { return newTableMap; } - private synchronized void flushConfig() - throws IOException { - flushConfig(this.rsGroupMap); - } +// TODO +// private synchronized void flushConfig() +// throws IOException { +// flushConfig(this.rsGroupMap); +// } private synchronized void flushConfig(Map newGroupMap) throws IOException { @@ -517,9 +513,11 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { } // Called by getDefaultServers. Presume it has lock in place. - private List getOnlineRS() throws IOException { + private Collection getOnlineRS() throws IOException { if (masterServices != null) { - return masterServices.getServerManager().getOnlineServersList(); + try (Admin admin = this.masterServices.getConnection().getAdmin()) { + return admin.getServers(); + } } LOG.debug("Reading online RS from zookeeper"); List servers = new LinkedList<>(); @@ -640,134 +638,115 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager { } private boolean waitForGroupTableOnline() { - final List foundRegions = new LinkedList<>(); - final List assignedRegions = new LinkedList<>(); final AtomicBoolean found = new AtomicBoolean(false); - final TableStateManager tsm = masterServices.getTableStateManager(); - boolean createSent = false; - while (!found.get() && isMasterRunning(masterServices)) { - foundRegions.clear(); - assignedRegions.clear(); - found.set(true); - try { - conn.getTable(TableName.NAMESPACE_TABLE_NAME); - conn.getTable(RSGROUP_TABLE_NAME); - boolean rootMetaFound = - masterServices.getMetaTableLocator().verifyMetaRegionLocation( - conn, masterServices.getZooKeeper(), 1); - final AtomicBoolean nsFound = new AtomicBoolean(false); - if (rootMetaFound) { - MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() { - @Override - public boolean visitInternal(Result row) throws IOException { - RegionInfo info = MetaTableAccessor.getRegionInfo(row); - if (info != null) { - Cell serverCell = - row.getColumnLatestCell(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - if (RSGROUP_TABLE_NAME.equals(info.getTable()) && serverCell != null) { - ServerName sn = - ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell)); - if (sn == null) { - found.set(false); - } else if (tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) { - try { - ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); - ClientProtos.GetRequest request = - RequestConverter.buildGetRequest(info.getRegionName(), - new Get(ROW_KEY)); - rs.get(null, request); - assignedRegions.add(info); - } catch(Exception ex) { - LOG.debug("Caught exception while verifying group region", ex); - } - } - foundRegions.add(info); - } - if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) { - Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); - ServerName sn = null; - if(cell != null) { - sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell)); - } - if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME, - TableState.State.ENABLED)) { - try { - ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); - ClientProtos.GetRequest request = - RequestConverter.buildGetRequest(info.getRegionName(), - new Get(ROW_KEY)); - rs.get(null, request); - nsFound.set(true); - } catch(Exception ex) { - LOG.debug("Caught exception while verifying group region", ex); - } - } - } - } - return true; - } - }; - MetaTableAccessor.fullScanRegions(conn, visitor); - // if no regions in meta then we have to create the table - if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) { - createRSGroupTable(); - createSent = true; - } - LOG.info("RSGroup table=" + RSGROUP_TABLE_NAME + " isOnline=" + found.get() - + ", regionCount=" + foundRegions.size() + ", assignCount=" - + assignedRegions.size() + ", rootMetaFound=" + rootMetaFound); - found.set(found.get() && assignedRegions.size() == foundRegions.size() - && foundRegions.size() > 0); - } else { - LOG.info("Waiting for catalog tables to come online"); - found.set(false); - } - if (found.get()) { - LOG.debug("With group table online, refreshing cached information."); - RSGroupInfoManagerImpl.this.refresh(true); - online = true; - //flush any inconsistencies between ZK and HTable - RSGroupInfoManagerImpl.this.flushConfig(); - } - } catch (RuntimeException e) { - throw e; - } catch(Exception e) { - found.set(false); - LOG.warn("Failed to perform check", e); - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - LOG.info("Sleep interrupted", e); - } - } +// final List foundRegions = new LinkedList<>(); +// final List assignedRegions = new LinkedList<>(); +// boolean createSent = false; +// while (!found.get() && isMasterRunning(masterServices)) { +// foundRegions.clear(); +// assignedRegions.clear(); +// found.set(true); +// try { +// conn.getTable(TableName.NAMESPACE_TABLE_NAME); +// conn.getTable(RSGROUP_TABLE_NAME); +// /** TODO: ADD MEANS OF ALLOWING VERIFICATION OF META AND NS DEPLOY +// boolean rootMetaFound = +// masterServices.getMetaTableLocator().verifyMetaRegionLocation(conn, masterServices.getZooKeeper(), 1); +// final AtomicBoolean nsFound = new AtomicBoolean(false); +// if (rootMetaFound) { +// MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() { +// @Override +// public boolean visitInternal(Result row) throws IOException { +// RegionInfo info = MetaTableAccessor.getRegionInfo(row); +// if (info != null) { +// Cell serverCell = +// row.getColumnLatestCell(HConstants.CATALOG_FAMILY, +// HConstants.SERVER_QUALIFIER); +// if (RSGROUP_TABLE_NAME.equals(info.getTable()) && serverCell != null) { +// ServerName sn = +// ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell)); +// if (sn == null) { +// found.set(false); +// } else if (tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) { +// try { +// ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); +// ClientProtos.GetRequest request = +// RequestConverter.buildGetRequest(info.getRegionName(), +// new Get(ROW_KEY)); +// rs.get(null, request); +// assignedRegions.add(info); +// } catch(Exception ex) { +// LOG.debug("Caught exception while verifying group region", ex); +// } +// } +// foundRegions.add(info); +// } +// if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) { +// Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY, +// HConstants.SERVER_QUALIFIER); +// ServerName sn = null; +// if(cell != null) { +// sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell)); +// } +// if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME, +// TableState.State.ENABLED)) { +// try { +// ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); +// ClientProtos.GetRequest request = +// RequestConverter.buildGetRequest(info.getRegionName(), +// new Get(ROW_KEY)); +// rs.get(null, request); +// nsFound.set(true); +// } catch(Exception ex) { +// LOG.debug("Caught exception while verifying group region", ex); +// } +// } +// } +// } +// return true; +// } +// }; +// MetaTableAccessor.fullScanRegions(conn, visitor); +// // if no regions in meta then we have to create the table +// if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) { +// createRSGroupTable(); +// createSent = true; +// } +// LOG.info("RSGroup table=" + RSGROUP_TABLE_NAME + " isOnline=" + found.get() +// + ", regionCount=" + foundRegions.size() + ", assignCount=" +// + assignedRegions.size() + ", rootMetaFound=" + rootMetaFound); +// found.set(found.get() && assignedRegions.size() == foundRegions.size() +// && foundRegions.size() > 0); +// } else { +// LOG.info("Waiting for catalog tables to come online"); +// found.set(false); +// } +// if (found.get()) { +// LOG.debug("With group table online, refreshing cached information."); +// RSGroupInfoManagerImpl.this.refresh(true); +// online = true; +// //flush any inconsistencies between ZK and HTable +// RSGroupInfoManagerImpl.this.flushConfig(); +// } +// */ +// } catch (RuntimeException e) { +// throw e; +// } catch(Exception e) { +// found.set(false); +// LOG.warn("Failed to perform check", e); +// } +// try { +// Thread.sleep(100); +// } catch (InterruptedException e) { +// LOG.info("Sleep interrupted", e); +// } +// } return found.get(); } private void createRSGroupTable() throws IOException { - Long procId = masterServices.createSystemTable(RSGROUP_TABLE_DESC); - // wait for region to be online - int tries = 600; - while (!(masterServices.getMasterProcedureExecutor().isFinished(procId)) - && masterServices.getMasterProcedureExecutor().isRunning() - && tries > 0) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new IOException("Wait interrupted ", e); - } - tries--; - } - if(tries <= 0) { - throw new IOException("Failed to create group table in a given time."); - } else { - Procedure result = masterServices.getMasterProcedureExecutor().getResult(procId); - if (result != null && result.isFailed()) { - throw new IOException("Failed to create group table. " + - result.getException().unwrapRemoteIOException()); - } + try (Admin admin = masterServices.getConnection().getAdmin()) { + admin.createTable(RSGROUP_TABLE_DESC); } } diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java index f0c699828a..f05f342c44 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java @@ -1,4 +1,4 @@ -/** +/* * Copyright The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one @@ -19,10 +19,12 @@ */ package org.apache.hadoop.hbase.rsgroup; +import java.io.IOException; import java.util.HashSet; import java.util.Set; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.net.Address; @@ -33,14 +35,15 @@ import org.apache.hadoop.hbase.net.Address; @InterfaceAudience.Private class Utility { /** - * @param master * @return Set of online Servers named for their hostname and port (not ServerName). */ - static Set
getOnlineServers(final MasterServices master) { - Set
onlineServers = new HashSet
(); + static Set
getOnlineAddresses(final MasterServices master) throws IOException { + Set
onlineServers = new HashSet<>(); if (master == null) return onlineServers; - for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { - onlineServers.add(server.getAddress()); + try (Admin admin = master.getConnection().getAdmin()) { + for (ServerName server: admin.getServers()) { + onlineServers.add(server.getAddress()); + } } return onlineServers; } diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index db7cf4d649..bb9b4f684b 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -101,7 +101,7 @@ public class TestRSGroupBasedLoadBalancer { conf.set("hbase.rsgroup.grouploadbalancer.class", SimpleLoadBalancer.class.getCanonicalName()); loadBalancer = new RSGroupBasedLoadBalancer(); loadBalancer.setRsGroupInfoManager(getMockedGroupInfoManager()); - loadBalancer.setMasterServices(getMockedMaster()); + loadBalancer.setMaster(getMockedHMaster()); loadBalancer.setConf(conf); loadBalancer.initialize(); } @@ -556,17 +556,17 @@ public class TestRSGroupBasedLoadBalancer { return tds; } - private static MasterServices getMockedMaster() throws IOException { + private static HMaster getMockedHMaster() throws IOException { TableDescriptors tds = Mockito.mock(TableDescriptors.class); Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); - MasterServices services = Mockito.mock(HMaster.class); - Mockito.when(services.getTableDescriptors()).thenReturn(tds); + HMaster master = Mockito.mock(HMaster.class); + Mockito.when(master.getTableDescriptors()).thenReturn(tds); AssignmentManager am = Mockito.mock(AssignmentManager.class); - Mockito.when(services.getAssignmentManager()).thenReturn(am); - return services; + Mockito.when(master.getAssignmentManager()).thenReturn(am); + return master; } private static RSGroupInfoManager getMockedGroupInfoManager() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index f9cb3bede6..a1bdbd79f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -19,9 +19,7 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +27,9 @@ import org.apache.yetus.audience.InterfaceAudience; * Defines the set of shared functions implemented by HBase servers (Masters * and RegionServers). */ -@InterfaceAudience.Private +// This Interface shines through MasterServices and RegionServerServices. They subclass it. +// Be careful what you add here. Make sure it ok for CPs to have access else add elsewhere. +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public interface Server extends Abortable, Stoppable { /** * Gets the configuration object for this server. @@ -50,32 +50,11 @@ public interface Server extends Abortable, Stoppable { Connection getConnection(); /** - * Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}. - * - * Important note: this method returns a reference to Connection which is managed - * by Server itself, so callers must NOT attempt to close connection obtained. - */ - ClusterConnection getClusterConnection(); - - /** - * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator} - * running inside this server. This MetaServerLocator is started and stopped by server, clients - * shouldn't manage it's lifecycle. - * @return instance of {@link MetaTableLocator} associated with this server. - */ - MetaTableLocator getMetaTableLocator(); - - /** * @return The unique server name for this server. */ ServerName getServerName(); /** - * Get CoordinatedStateManager instance for this server. - */ - CoordinatedStateManager getCoordinatedStateManager(); - - /** * @return The {@link ChoreService} instance for this server */ ChoreService getChoreService(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index 0727375e18..7a8c826386 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index df8103b31b..0204518025 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -26,7 +26,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective; import org.apache.hadoop.hbase.master.SplitLogManager.Task; import org.apache.yetus.audience.InterfaceAudience; @@ -54,33 +54,43 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLo */ @InterfaceAudience.Private public interface SplitLogManagerCoordination { - /** - * Detail class that shares data between coordination and split log manager + * Class that lets out detail on state of splitting. */ public static class SplitLogManagerDetails { final private ConcurrentMap tasks; - final private MasterServices master; + final private HMaster master; final private Set failedDeletions; - public SplitLogManagerDetails(ConcurrentMap tasks, MasterServices master, + public SplitLogManagerDetails(ConcurrentMap tasks, HMaster master, Set failedDeletions) { this.tasks = tasks; this.master = master; this.failedDeletions = failedDeletions; } - /** - * @return the master value - */ - public MasterServices getMaster() { - return master; + boolean isInitialized() { + return this.master.isInitialized(); + } + + boolean areDeadServersInProgress() { + return this.master.getServerManager().areDeadServersInProgress(); + } + + long getLastFlushedSequenceId(String regionEncodedName) { + return this.master.getServerManager().getLastFlushedSequenceId(regionEncodedName.getBytes()). + getLastFlushedSequenceId(); + } + + boolean isServerOnline(ServerName serverName) { + return this.master.getServerManager() != null && + this.master.getServerManager().isServerOnline(serverName); } /** * @return map of tasks */ - public ConcurrentMap getTasks() { + ConcurrentMap getTasks() { return tasks; } @@ -94,7 +104,7 @@ public interface SplitLogManagerCoordination { /** * @return server name */ - public ServerName getServerName() { + ServerName getServerName() { return master.getServerName(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 5fd20e82c4..06325aa745 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -195,9 +195,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements // finished the task. This allows to continue if the worker cannot actually handle it, // for any reason. final long time = EnvironmentEdgeManager.currentTime() - task.last_update; - final boolean alive = - details.getMaster().getServerManager() != null ? details.getMaster().getServerManager() - .isServerOnline(task.cur_worker_name) : true; + final boolean alive = details.isServerOnline(task.cur_worker_name); if (alive && time < timeout) { LOG.trace("Skipping the resubmit of " + task.toString() + " because the server " + task.cur_worker_name + " is not marked as dead, we waited for " + time @@ -301,8 +299,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements } } } - if (count == 0 && this.details.getMaster().isInitialized() - && !this.details.getMaster().getServerManager().areDeadServersInProgress()) { + if (count == 0 && this.details.isInitialized() && !this.details.areDeadServersInProgress()) { // No splitting work items left ZKSplitLog.deleteRecoveringRegionZNodes(watcher, null); // reset lastRecoveringNodeCreationTime because we cleared all recovering znodes at @@ -608,9 +605,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements regionEncodeName); long lastRecordedFlushedSequenceId = -1; try { - long lastSequenceId = - this.details.getMaster().getServerManager() - .getLastFlushedSequenceId(regionEncodeName.getBytes()).getLastFlushedSequenceId(); + long lastSequenceId = this.details.getLastFlushedSequenceId(regionEncodeName); /* * znode layout: .../region_id[last known flushed sequence id]/failed server[last known diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index 1668b69f2d..cd5e71c0b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -29,7 +29,8 @@ import org.apache.hadoop.hbase.metrics.MetricRegistry; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment { - /** @return reference to the HMaster services */ + /** @return reference to the HMaster services, a severe subset of HMaster facility that + * we have carefully curated as safe to expose to Coprocessors. */ MasterServices getMasterServices(); /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 85da610a8e..0cd9b9b760 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.util.List; import java.util.Set; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -1461,4 +1460,16 @@ public interface MasterObserver { */ default void postClearDeadServers(ObserverContext ctx) throws IOException {} + + /** + * Called before get region servers. + */ + default void preGetServers(ObserverContext ctx) + throws IOException {} + + /** + * Called after get region servers. + */ + default void postGetServers(ObserverContext ctx, + List servers) throws IOException {} } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java index 68e5e897d3..2be7c607e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeLoadBalancer.java @@ -82,7 +82,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored public synchronized void initialize() throws HBaseIOException { super.initialize(); super.setConf(conf); - this.fnm = services.getFavoredNodesManager(); + this.fnm = getMaster().getFavoredNodesManager(); this.rackManager = new RackManager(conf); super.setConf(conf); } @@ -93,7 +93,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored List plans = new ArrayList<>(); //perform a scan of the meta to get the latest updates (if any) SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment = - new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection()); + new SnapshotOfRegionAssignmentFromMeta(getMaster().getConnection()); try { snaphotOfRegionAssignment.initialize(); } catch (IOException ie) { @@ -102,7 +102,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } // This is not used? Findbugs says so: Map serverNameToServerNameWithoutCode = new HashMap<>(); Map serverNameWithoutCodeToServerName = new HashMap<>(); - ServerManager serverMgr = super.services.getServerManager(); + ServerManager serverMgr = getMaster().getServerManager(); for (ServerName sn: serverMgr.getOnlineServersList()) { ServerName s = ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE); // FindBugs complains about useless store! serverNameToServerNameWithoutCode.put(sn, s); @@ -133,9 +133,9 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored } //the region is currently on none of the favored nodes //get it on one of them if possible - ServerLoad l1 = super.services.getServerManager().getLoad( + ServerLoad l1 = getMaster().getServerManager().getLoad( serverNameWithoutCodeToServerName.get(favoredNodes.get(1))); - ServerLoad l2 = super.services.getServerManager().getLoad( + ServerLoad l2 = getMaster().getServerManager().getLoad( serverNameWithoutCodeToServerName.get(favoredNodes.get(2))); if (l1 != null && l2 != null) { if (l1.getLoad() > l2.getLoad()) { @@ -297,8 +297,8 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer implements Favored // assign the region to the one with a lower load // (both have the desired hdfs blocks) ServerName s; - ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); - ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); + ServerLoad tertiaryLoad = getMaster().getServerManager().getLoad(tertiaryHost); + ServerLoad secondaryLoad = getMaster().getServerManager().getLoad(secondaryHost); if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { s = secondaryHost; } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 36d383a3e4..851be7c109 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import java.nio.channels.GatheringByteChannel; import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; import java.util.ArrayList; @@ -372,7 +371,7 @@ public abstract class RpcServer implements RpcServerInterface, conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000); long maxAge = conf.getLong("hbase.auth.token.max.lifetime", 7*24*60*60*1000); - return new AuthenticationTokenSecretManager(conf, server.getZooKeeper(), + return new AuthenticationTokenSecretManager(conf, this.server.getZooKeeper(), server.getServerName().toString(), keyUpdateInterval, maxAge); } @@ -585,7 +584,7 @@ public abstract class RpcServer implements RpcServerInterface, /** * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)} - * and {@link #channelWrite(GatheringByteChannel, BufferChain)}. Only + * and #channelWrite(GatheringByteChannel, BufferChain). Only * one of readCh or writeCh should be non-null. * * @param readCh read channel @@ -594,7 +593,6 @@ public abstract class RpcServer implements RpcServerInterface, * @return bytes written * @throws java.io.IOException e * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer) - * @see #channelWrite(GatheringByteChannel, BufferChain) */ private static int channelIO(ReadableByteChannel readCh, WritableByteChannel writeCh, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index d3ba231b15..4e0114630c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -64,15 +64,15 @@ public class CatalogJanitor extends ScheduledChore { private final AtomicBoolean alreadyRunning = new AtomicBoolean(false); private final AtomicBoolean enabled = new AtomicBoolean(true); - private final MasterServices services; private final Connection connection; + private final HMaster master; // PID of the last Procedure launched herein. Keep around for Tests. - CatalogJanitor(final MasterServices services) { - super("CatalogJanitor-" + services.getServerName().toShortString(), services, - services.getConfiguration().getInt("hbase.catalogjanitor.interval", 300000)); - this.services = services; - this.connection = services.getConnection(); + CatalogJanitor(HMaster master) { + super("CatalogJanitor-" + master.getServerName().toShortString(), master, + master.getConfiguration().getInt("hbase.catalogjanitor.interval", 300000)); + this.master = master; + this.connection = this.master.getConnection(); } @Override @@ -110,16 +110,16 @@ public class CatalogJanitor extends ScheduledChore { @Override protected void chore() { try { - AssignmentManager am = this.services.getAssignmentManager(); + AssignmentManager am = this.master.getAssignmentManager(); if (this.enabled.get() - && !this.services.isInMaintenanceMode() + && !this.master.isInMaintenanceMode() && am != null && am.isFailoverCleanupDone() && !am.hasRegionsInTransition()) { scan(); } else { LOG.warn("CatalogJanitor is disabled! Enabled=" + this.enabled.get() + - ", maintenanceMode=" + this.services.isInMaintenanceMode() + + ", maintenanceMode=" + this.master.isInMaintenanceMode() + ", am=" + am + ", failoverCleanupDone=" + (am != null && am.isFailoverCleanupDone()) + ", hasRIT=" + (am != null && am.hasRegionsInTransition())); } @@ -200,14 +200,14 @@ public class CatalogJanitor extends ScheduledChore { */ boolean cleanMergeRegion(final RegionInfo mergedRegion, final RegionInfo regionA, final RegionInfo regionB) throws IOException { - FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); - Path rootdir = this.services.getMasterFileSystem().getRootDir(); + FileSystem fs = this.master.getMasterFileSystem().getFileSystem(); + Path rootdir = this.master.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable()); TableDescriptor htd = getTableDescriptor(mergedRegion.getTable()); HRegionFileSystem regionFs = null; try { regionFs = HRegionFileSystem.openRegionFromFileSystem( - this.services.getConfiguration(), fs, tabledir, mergedRegion, true); + this.master.getConfiguration(), fs, tabledir, mergedRegion, true); } catch (IOException e) { LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName()); } @@ -215,14 +215,14 @@ public class CatalogJanitor extends ScheduledChore { LOG.debug("Deleting region " + regionA.getShortNameToLog() + " and " + regionB.getShortNameToLog() + " from fs because merged region no longer holds references"); - ProcedureExecutor pe = this.services.getMasterProcedureExecutor(); + ProcedureExecutor pe = this.master.getMasterProcedureExecutor(); pe.submitProcedure(new GCMergedRegionsProcedure(pe.getEnvironment(), mergedRegion, regionA, regionB)); // Remove from in-memory states - this.services.getAssignmentManager().getRegionStates().deleteRegion(regionA); - this.services.getAssignmentManager().getRegionStates().deleteRegion(regionB); - this.services.getServerManager().removeRegion(regionA); - this.services.getServerManager().removeRegion(regionB); + this.master.getAssignmentManager().getRegionStates().deleteRegion(regionA); + this.master.getAssignmentManager().getRegionStates().deleteRegion(regionB); + this.master.getServerManager().removeRegion(regionA); + this.master.getServerManager().removeRegion(regionB); return true; } return false; @@ -249,7 +249,7 @@ public class CatalogJanitor extends ScheduledChore { */ Map mergedRegions = scanTriple.getSecond(); for (Map.Entry e : mergedRegions.entrySet()) { - if (this.services.isInMaintenanceMode()) { + if (this.master.isInMaintenanceMode()) { // Stop cleaning if the master is in maintenance mode break; } @@ -278,7 +278,7 @@ public class CatalogJanitor extends ScheduledChore { // regions whose parents are still around HashSet parentNotCleaned = new HashSet<>(); for (Map.Entry e : splitParents.entrySet()) { - if (this.services.isInMaintenanceMode()) { + if (this.master.isInMaintenanceMode()) { // Stop cleaning if the master is in maintenance mode break; } @@ -356,11 +356,11 @@ public class CatalogJanitor extends ScheduledChore { LOG.debug("Deleting region " + parent.getShortNameToLog() + " because daughters -- " + daughterA + ", " + daughterB + " -- no longer hold references"); - ProcedureExecutor pe = this.services.getMasterProcedureExecutor(); + ProcedureExecutor pe = this.master.getMasterProcedureExecutor(); pe.submitProcedure(new GCRegionProcedure(pe.getEnvironment(), parent)); // Remove from in-memory states - this.services.getAssignmentManager().getRegionStates().deleteRegion(parent); - this.services.getServerManager().removeRegion(parent); + this.master.getAssignmentManager().getRegionStates().deleteRegion(parent); + this.master.getServerManager().removeRegion(parent); return true; } return false; @@ -392,8 +392,8 @@ public class CatalogJanitor extends ScheduledChore { return new Pair<>(Boolean.FALSE, Boolean.FALSE); } - FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); - Path rootdir = this.services.getMasterFileSystem().getRootDir(); + FileSystem fs = this.master.getMasterFileSystem().getFileSystem(); + Path rootdir = this.master.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable()); Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName()); @@ -414,7 +414,7 @@ public class CatalogJanitor extends ScheduledChore { TableDescriptor parentDescriptor = getTableDescriptor(parent.getTable()); try { regionFs = HRegionFileSystem.openRegionFromFileSystem( - this.services.getConfiguration(), fs, tabledir, daughter, true); + this.master.getConfiguration(), fs, tabledir, daughter, true); for (ColumnFamilyDescriptor family: parentDescriptor.getColumnFamilies()) { if ((references = regionFs.hasReferences(family.getNameAsString()))) { @@ -431,7 +431,7 @@ public class CatalogJanitor extends ScheduledChore { private TableDescriptor getTableDescriptor(final TableName tableName) throws FileNotFoundException, IOException { - return this.services.getTableDescriptors().get(tableName); + return this.master.getTableDescriptors().get(tableName); } /** @@ -446,7 +446,7 @@ public class CatalogJanitor extends ScheduledChore { // Get merge regions if it is a merged region and already has merge // qualifier Pair mergeRegions = MetaTableAccessor - .getRegionsFromMergeQualifier(this.services.getConnection(), + .getRegionsFromMergeQualifier(this.master.getConnection(), region.getRegionName()); if (mergeRegions == null || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java index 746f9ad62c..dd8f8babeb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchema.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java index f12ec3952c..67ed809834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java @@ -39,13 +39,13 @@ import org.apache.hadoop.hbase.util.NonceKey; @InterfaceAudience.Private class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaService { private final TableNamespaceManager tableNamespaceManager; - private final MasterServices masterServices; + private final HMaster master; private final static List EMPTY_NAMESPACE_LIST = Collections.unmodifiableList(new ArrayList(0)); - ClusterSchemaServiceImpl(final MasterServices masterServices) { - this.masterServices = masterServices; - this.tableNamespaceManager = new TableNamespaceManager(masterServices); + ClusterSchemaServiceImpl(HMaster master) { + this.master = master; + this.tableNamespaceManager = new TableNamespaceManager(master); } // All below are synchronized so consistent view on whether running or not. @@ -79,7 +79,7 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS private long submitProcedure(final Procedure procedure, final NonceKey nonceKey) throws ServiceNotRunningException { checkIsRunning(); - ProcedureExecutor pe = this.masterServices.getMasterProcedureExecutor(); + ProcedureExecutor pe = this.master.getMasterProcedureExecutor(); return pe.submitProcedure(procedure, nonceKey); } @@ -87,24 +87,21 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS public long createNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey) throws IOException { return submitProcedure(new CreateNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor), - nonceKey); + this.master.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor), nonceKey); } @Override public long modifyNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey nonceKey) throws IOException { return submitProcedure(new ModifyNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor), - nonceKey); + this.master.getMasterProcedureExecutor().getEnvironment(), namespaceDescriptor), nonceKey); } @Override public long deleteNamespace(String name, final NonceKey nonceKey) throws IOException { return submitProcedure(new DeleteNamespaceProcedure( - this.masterServices.getMasterProcedureExecutor().getEnvironment(), name), - nonceKey); + this.master.getMasterProcedureExecutor().getEnvironment(), name), nonceKey); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 9111f94182..41894d276a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -675,7 +675,6 @@ public class HMaster extends HRegionServer implements MasterServices { return MasterDumpServlet.class; } - @Override public MetricsMaster getMasterMetrics() { return metricsMaster; } @@ -687,7 +686,7 @@ public class HMaster extends HRegionServer implements MasterServices { InterruptedException, KeeperException, CoordinatedStateException { this.balancer = LoadBalancerFactory.getLoadBalancer(conf); this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf); - this.normalizer.setMasterServices(this); + this.normalizer.setMaster(this); this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices); this.loadBalancerTracker = new LoadBalancerTracker(zooKeeper, this); this.loadBalancerTracker.start(); @@ -704,7 +703,7 @@ public class HMaster extends HRegionServer implements MasterServices { this.replicationManager = new ReplicationManager(conf, zooKeeper, this); - this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager); + this.regionServerTracker = new RegionServerTracker(this); this.regionServerTracker.start(); this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager); @@ -724,7 +723,8 @@ public class HMaster extends HRegionServer implements MasterServices { ", setting cluster-up flag (Was=" + wasUp + ")"); // create/initialize the snapshot manager and other procedure managers - this.snapshotManager = new SnapshotManager(); + this.snapshotManager = + new SnapshotManager(this, null, this.getExecutorService()); this.mpmHost = new MasterProcedureManagerHost(); this.mpmHost.register(this.snapshotManager); this.mpmHost.register(new MasterFlushTableProcedureManager()); @@ -784,7 +784,7 @@ public class HMaster extends HRegionServer implements MasterServices { ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); this.initLatch.countDown(); - this.serverManager = createServerManager(this); + this.serverManager = createServerManager(); this.tableStateManager = new TableStateManager(this); @@ -829,7 +829,7 @@ public class HMaster extends HRegionServer implements MasterServices { } //initialize load balancer - this.balancer.setMasterServices(this); + this.balancer.setMaster(this); this.balancer.setClusterStatus(getClusterStatus()); this.balancer.initialize(); @@ -1003,11 +1003,13 @@ public class HMaster extends HRegionServer implements MasterServices { /** * Create a {@link ServerManager} instance. */ - ServerManager createServerManager(final MasterServices master) throws IOException { + // Used in tests to intercede. + @VisibleForTesting + protected ServerManager createServerManager() throws IOException { // We put this out here in a method so can do a Mockito.spy and stub it out // w/ a mocked up ServerManager. setupClusterConnection(); - return new ServerManager(master); + return new ServerManager(this); } private void waitForRegionServers(final MonitoredTask status) @@ -1072,22 +1074,18 @@ public class HMaster extends HRegionServer implements MasterServices { return this.tableDescriptors; } - @Override public ServerManager getServerManager() { return this.serverManager; } - @Override public MasterFileSystem getMasterFileSystem() { return this.fileSystemManager; } - @Override public MasterWalManager getMasterWalManager() { return this.walManager; } - @Override public TableStateManager getTableStateManager() { return tableStateManager; } @@ -1474,7 +1472,6 @@ public class HMaster extends HRegionServer implements MasterServices { return true; } - @Override @VisibleForTesting public RegionNormalizer getRegionNormalizer() { return this.normalizer; @@ -1558,7 +1555,6 @@ public class HMaster extends HRegionServer implements MasterServices { this.catalogJanitorChore.setEnabled(b); } - @Override public long mergeRegions( final RegionInfo[] regionsToMerge, final boolean forcible, @@ -1584,11 +1580,11 @@ public class HMaster extends HRegionServer implements MasterServices { "Cannot merge a region to itself " + regionsToMerge[0] + ", " + regionsToMerge[1]); } - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure( new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preMergeRegions(regionsToMerge); + getMasterCoprocessorHost().preMergeRegions(regionsToMerge); LOG.info(getClientIdAuditPrefix() + " Merge regions " + regionsToMerge[0].getEncodedName() + " and " + regionsToMerge[1].getEncodedName()); @@ -1596,7 +1592,7 @@ public class HMaster extends HRegionServer implements MasterServices { submitProcedure(new MergeTableRegionsProcedure(procedureExecutor.getEnvironment(), regionsToMerge, forcible)); - getMaster().getMasterCoprocessorHost().postMergeRegions(regionsToMerge); + getMasterCoprocessorHost().postMergeRegions(regionsToMerge); } @Override @@ -1606,16 +1602,15 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long splitRegion(final RegionInfo regionInfo, final byte[] splitRow, final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); + getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); // Execute the operation asynchronously @@ -1725,7 +1720,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public long createTable( final TableDescriptor tableDescriptor, final byte [][] splitKeys, @@ -1739,11 +1733,11 @@ public class HMaster extends HRegionServer implements MasterServices { RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, splitKeys); sanityCheckTableDescriptor(tableDescriptor); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions); + getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions); LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor); @@ -1754,7 +1748,7 @@ public class HMaster extends HRegionServer implements MasterServices { procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions); + getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions); } @Override @@ -1764,7 +1758,6 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException { if (isStopped()) { throw new MasterNotRunningException(); @@ -2093,18 +2086,17 @@ public class HMaster extends HRegionServer implements MasterServices { return tableName.equals(TableName.META_TABLE_NAME); } - @Override public long deleteTable( final TableName tableName, final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDeleteTable(tableName); + getMasterCoprocessorHost().preDeleteTable(tableName); LOG.info(getClientIdAuditPrefix() + " delete " + tableName); @@ -2114,7 +2106,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postDeleteTable(tableName); + getMasterCoprocessorHost().postDeleteTable(tableName); } @Override @@ -2124,7 +2116,6 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long truncateTable( final TableName tableName, final boolean preserveSplits, @@ -2132,11 +2123,11 @@ public class HMaster extends HRegionServer implements MasterServices { final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preTruncateTable(tableName); + getMasterCoprocessorHost().preTruncateTable(tableName); LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); @@ -2144,7 +2135,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, preserveSplits, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postTruncateTable(tableName); + getMasterCoprocessorHost().postTruncateTable(tableName); } @Override @@ -2154,7 +2145,6 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long addColumn( final TableName tableName, final ColumnFamilyDescriptor columnDescriptor, @@ -2166,11 +2156,11 @@ public class HMaster extends HRegionServer implements MasterServices { checkEncryption(conf, columnDescriptor); checkReplicationScope(columnDescriptor); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - if (getMaster().getMasterCoprocessorHost().preAddColumn(tableName, columnDescriptor)) { + if (getMasterCoprocessorHost().preAddColumn(tableName, columnDescriptor)) { return; } @@ -2180,7 +2170,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, columnDescriptor, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postAddColumn(tableName, columnDescriptor); + getMasterCoprocessorHost().postAddColumn(tableName, columnDescriptor); } @Override @@ -2190,7 +2180,6 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long modifyColumn( final TableName tableName, final ColumnFamilyDescriptor descriptor, @@ -2202,11 +2191,11 @@ public class HMaster extends HRegionServer implements MasterServices { checkEncryption(conf, descriptor); checkReplicationScope(descriptor); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - if (getMaster().getMasterCoprocessorHost().preModifyColumn(tableName, descriptor)) { + if (getMasterCoprocessorHost().preModifyColumn(tableName, descriptor)) { return; } @@ -2219,7 +2208,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, descriptor, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postModifyColumn(tableName, descriptor); + getMasterCoprocessorHost().postModifyColumn(tableName, descriptor); } @Override @@ -2229,7 +2218,6 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long deleteColumn( final TableName tableName, final byte[] columnName, @@ -2238,11 +2226,11 @@ public class HMaster extends HRegionServer implements MasterServices { throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - if (getMaster().getMasterCoprocessorHost().preDeleteColumn(tableName, columnName)) { + if (getMasterCoprocessorHost().preDeleteColumn(tableName, columnName)) { return; } @@ -2254,7 +2242,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, columnName, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postDeleteColumn(tableName, columnName); + getMasterCoprocessorHost().postDeleteColumn(tableName, columnName); } @Override @@ -2264,16 +2252,15 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long enableTable(final TableName tableName, final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preEnableTable(tableName); + getMasterCoprocessorHost().preEnableTable(tableName); // Normally, it would make sense for this authorization check to exist inside // AccessController, but because the authorization check is done based on internal state @@ -2307,7 +2294,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, false, prepareLatch)); prepareLatch.await(); - getMaster().getMasterCoprocessorHost().postEnableTable(tableName); + getMasterCoprocessorHost().postEnableTable(tableName); } @Override @@ -2317,16 +2304,15 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public long disableTable(final TableName tableName, final long nonceGroup, final long nonce) throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preDisableTable(tableName); + getMasterCoprocessorHost().preDisableTable(tableName); LOG.info(getClientIdAuditPrefix() + " disable " + tableName); @@ -2340,7 +2326,7 @@ public class HMaster extends HRegionServer implements MasterServices { tableName, false, prepareLatch)); prepareLatch.await(); - getMaster().getMasterCoprocessorHost().postDisableTable(tableName); + getMasterCoprocessorHost().postDisableTable(tableName); } @Override @@ -2386,17 +2372,16 @@ public class HMaster extends HRegionServer implements MasterServices { return result.get(); } - @Override public long modifyTable(final TableName tableName, final TableDescriptor descriptor, final long nonceGroup, final long nonce) throws IOException { checkInitialized(); sanityCheckTableDescriptor(descriptor); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - getMaster().getMasterCoprocessorHost().preModifyTable(tableName, descriptor); + getMasterCoprocessorHost().preModifyTable(tableName, descriptor); LOG.info(getClientIdAuditPrefix() + " modify " + tableName); @@ -2406,7 +2391,7 @@ public class HMaster extends HRegionServer implements MasterServices { descriptor, latch)); latch.await(); - getMaster().getMasterCoprocessorHost().postModifyTable(tableName, descriptor); + getMasterCoprocessorHost().postModifyTable(tableName, descriptor); } @Override @@ -2425,8 +2410,8 @@ public class HMaster extends HRegionServer implements MasterServices { final TableName dstTable = TableName.valueOf(snapshotDesc.getTable()); getClusterSchema().getNamespace(dstTable.getNamespaceAsString()); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { setProcId( @@ -2440,7 +2425,6 @@ public class HMaster extends HRegionServer implements MasterServices { }); } - @Override public void checkTableModifiable(final TableName tableName) throws IOException, TableNotFoundException, TableNotDisabledException { if (isCatalogTable(tableName)) { @@ -2611,7 +2595,6 @@ public class HMaster extends HRegionServer implements MasterServices { return "0.0.0"; //Lowest version to prevent move system region to unknown version RS. } - @Override public void checkIfShouldMoveSystemRegionAsync() { assignmentManager.checkIfShouldMoveSystemRegionAsync(); } @@ -2652,17 +2635,14 @@ public class HMaster extends HRegionServer implements MasterServices { return zooKeeper; } - @Override public MasterCoprocessorHost getMasterCoprocessorHost() { return cpHost; } - @Override public MasterQuotaManager getMasterQuotaManager() { return quotaManager; } - @Override public ProcedureExecutor getMasterProcedureExecutor() { return procedureExecutor; } @@ -2672,12 +2652,10 @@ public class HMaster extends HRegionServer implements MasterServices { return this.serverName; } - @Override public AssignmentManager getAssignmentManager() { return this.assignmentManager; } - @Override public CatalogJanitor getCatalogJanitor() { return this.catalogJanitorChore; } @@ -2765,7 +2743,6 @@ public class HMaster extends HRegionServer implements MasterServices { procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized); } - @Override public ProcedureEvent getInitializedEvent() { return initialized; } @@ -2884,7 +2861,6 @@ public class HMaster extends HRegionServer implements MasterServices { /** * @return the underlying snapshot manager */ - @Override public SnapshotManager getSnapshotManager() { return this.snapshotManager; } @@ -2892,12 +2868,10 @@ public class HMaster extends HRegionServer implements MasterServices { /** * @return the underlying MasterProcedureManagerHost */ - @Override public MasterProcedureManagerHost getMasterProcedureManagerHost() { return mpmHost; } - @Override public ClusterSchema getClusterSchema() { return this.clusterSchemaService; } @@ -2916,18 +2890,18 @@ public class HMaster extends HRegionServer implements MasterServices { TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName())); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - if (getMaster().getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor)) { + if (getMasterCoprocessorHost().preCreateNamespace(namespaceDescriptor)) { throw new BypassCoprocessorException(); } LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor); // Execute the operation synchronously - wait for the operation to complete before // continuing. setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey())); - getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor); + getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor); } @Override @@ -2950,18 +2924,18 @@ public class HMaster extends HRegionServer implements MasterServices { TableName.isLegalNamespaceName(Bytes.toBytes(namespaceDescriptor.getName())); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - if (getMaster().getMasterCoprocessorHost().preModifyNamespace(namespaceDescriptor)) { + if (getMasterCoprocessorHost().preModifyNamespace(namespaceDescriptor)) { throw new BypassCoprocessorException(); } LOG.info(getClientIdAuditPrefix() + " modify " + namespaceDescriptor); // Execute the operation synchronously - wait for the operation to complete before // continuing. setProcId(getClusterSchema().modifyNamespace(namespaceDescriptor, getNonceKey())); - getMaster().getMasterCoprocessorHost().postModifyNamespace(namespaceDescriptor); + getMasterCoprocessorHost().postModifyNamespace(namespaceDescriptor); } @Override @@ -2982,18 +2956,18 @@ public class HMaster extends HRegionServer implements MasterServices { throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( - new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + return MasterProcedureUtil.submitProcedure(new MasterProcedureUtil. + NonceProcedureRunnable(this.getMasterProcedureExecutor(), nonceGroup, nonce) { @Override protected void run() throws IOException { - if (getMaster().getMasterCoprocessorHost().preDeleteNamespace(name)) { + if (getMasterCoprocessorHost().preDeleteNamespace(name)) { throw new BypassCoprocessorException(); } LOG.info(getClientIdAuditPrefix() + " delete " + name); // Execute the operation synchronously - wait for the operation to complete before // continuing. setProcId(getClusterSchema().deleteNamespace(name, getNonceKey())); - getMaster().getMasterCoprocessorHost().postDeleteNamespace(name); + getMasterCoprocessorHost().postDeleteNamespace(name); } @Override @@ -3034,19 +3008,16 @@ public class HMaster extends HRegionServer implements MasterServices { return nsds; } - @Override public List listTableNamesByNamespace(String name) throws IOException { checkInitialized(); return listTableNames(name, null, true); } - @Override public List listTableDescriptorsByNamespace(String name) throws IOException { checkInitialized(); return listTableDescriptors(name, null, null, true); } - @Override public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) throws IOException { if (cpHost != null) { @@ -3062,7 +3033,6 @@ public class HMaster extends HRegionServer implements MasterServices { return result; } - @Override public List> getProcedures() throws IOException { if (cpHost != null) { cpHost.preGetProcedures(); @@ -3077,7 +3047,6 @@ public class HMaster extends HRegionServer implements MasterServices { return procList; } - @Override public List getLocks() throws IOException { if (cpHost != null) { cpHost.preGetLocks(); @@ -3200,12 +3169,10 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public long getLastMajorCompactionTimestamp(TableName table) throws IOException { return getClusterStatus().getLastMajorCompactionTsForTable(table); } - @Override public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { return getClusterStatus().getLastMajorCompactionTsForRegion(regionName); } @@ -3331,17 +3298,14 @@ public class HMaster extends HRegionServer implements MasterServices { return splitOrMergeTracker; } - @Override public LoadBalancer getLoadBalancer() { return balancer; } - @Override public FavoredNodesManager getFavoredNodesManager() { return favoredNodesManager; } - @Override public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) throws ReplicationException, IOException { if (cpHost != null) { @@ -3355,7 +3319,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public void removeReplicationPeer(String peerId) throws ReplicationException, IOException { if (cpHost != null) { cpHost.preRemoveReplicationPeer(peerId); @@ -3367,7 +3330,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public void enableReplicationPeer(String peerId) throws ReplicationException, IOException { if (cpHost != null) { cpHost.preEnableReplicationPeer(peerId); @@ -3379,7 +3341,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public void disableReplicationPeer(String peerId) throws ReplicationException, IOException { if (cpHost != null) { cpHost.preDisableReplicationPeer(peerId); @@ -3391,7 +3352,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException, IOException { if (cpHost != null) { @@ -3406,7 +3366,6 @@ public class HMaster extends HRegionServer implements MasterServices { return peerConfig; } - @Override public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) throws ReplicationException, IOException { if (cpHost != null) { @@ -3420,7 +3379,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public List listReplicationPeers(String regex) throws ReplicationException, IOException { if (cpHost != null) { @@ -3435,7 +3393,6 @@ public class HMaster extends HRegionServer implements MasterServices { return peers; } - @Override public void drainRegionServer(final ServerName server) { String parentZnode = getZooKeeper().znodePaths.drainingZNode; try { @@ -3447,7 +3404,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public List listDrainingRegionServers() { String parentZnode = getZooKeeper().znodePaths.drainingZNode; List serverNames = new ArrayList<>(); @@ -3473,7 +3429,6 @@ public class HMaster extends HRegionServer implements MasterServices { return serverNames; } - @Override public void removeDrainFromRegionServer(ServerName server) { String parentZnode = getZooKeeper().znodePaths.drainingZNode; String node = ZKUtil.joinZNode(parentZnode, server.getServerName()); @@ -3485,12 +3440,10 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override public LockManager getLockManager() { return lockManager; } - @Override public boolean recoverMeta() throws IOException { ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); long procId = procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch)); @@ -3508,4 +3461,14 @@ public class HMaster extends HRegionServer implements MasterServices { public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() { return this.spaceQuotaSnapshotNotifier; } + + @Override + public void registerListener(ServerListener listener) { + this.getServerManager().registerListener(listener); + } + + @Override + public boolean unregisterListener(ServerListener listener) { + return this.getServerManager().unregisterListener(listener); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 1dad70dc4c..6a1f2eb698 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -81,9 +81,9 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse /** * Set the master service. - * @param masterServices + * @param master */ - void setMasterServices(MasterServices masterServices); + void setMaster(HMaster master); /** * Perform the major balance operation diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 72ba5ae5a4..78509a3247 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -107,12 +107,12 @@ public class MasterCoprocessorHost } } - private MasterServices masterServices; + private HMaster master; - public MasterCoprocessorHost(final MasterServices services, final Configuration conf) { - super(services); + public MasterCoprocessorHost(final HMaster master, final Configuration conf) { + super(master); this.conf = conf; - this.masterServices = services; + this.master = master; // Log the state of coprocessor loading here; should appear only once or // twice in the daemon log, depending on HBase version, because there is // only one MasterCoprocessorHost instance in the master process @@ -129,9 +129,9 @@ public class MasterCoprocessorHost final int seq, final Configuration conf) { // If coprocessor exposes any services, register them. for (Service service : instance.getServices()) { - masterServices.registerService(service); + master.registerService(service); } - return new MasterEnvironment(instance, priority, seq, conf, masterServices); + return new MasterEnvironment(instance, priority, seq, conf, master); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 740edeceee..1bc4d9422d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -1966,7 +1966,8 @@ public class MasterRpcServices extends RSRpcServices for (int i = 0; i < request.getRegionInfoCount(); ++i) { regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i)); } - npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { + npr = new NonceProcedureRunnable(master.getMasterProcedureExecutor(), + request.getNonceGroup(), request.getNonce()) { @Override protected void run() throws IOException { setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos, @@ -1980,7 +1981,8 @@ public class MasterRpcServices extends RSRpcServices }; } else if (request.hasTableName()) { final TableName tableName = ProtobufUtil.toTableName(request.getTableName()); - npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { + npr = new NonceProcedureRunnable(master.getMasterProcedureExecutor(), + request.getNonceGroup(), request.getNonce()) { @Override protected void run() throws IOException { setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type, @@ -1993,7 +1995,8 @@ public class MasterRpcServices extends RSRpcServices } }; } else if (request.hasNamespace()) { - npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { + npr = new NonceProcedureRunnable(master.getMasterProcedureExecutor(), + request.getNonceGroup(), request.getNonce()) { @Override protected void run() throws IOException { setProcId(master.getLockManager().remoteLocks().requestNamespaceLock( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index e815950ca3..8f97bb535e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; @@ -54,290 +55,19 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe import com.google.protobuf.Service; /** - * Services Master supplies + * Services Master supplies to Coprocessors. + * Below is a carefully-curated list of Services we expose to Coprocessors. + * Be judicious adding API. This Interface is for Coprocessors only! Internally, we make use of the + * Master implementation and NOT this Interface. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public interface MasterServices extends Server { /** - * @return the underlying snapshot manager - */ - SnapshotManager getSnapshotManager(); - - /** - * @return the underlying MasterProcedureManagerHost - */ - MasterProcedureManagerHost getMasterProcedureManagerHost(); - - /** - * @return Master's instance of {@link ClusterSchema} - */ - ClusterSchema getClusterSchema(); - - /** - * @return Master's instance of the {@link AssignmentManager} - */ - AssignmentManager getAssignmentManager(); - - /** - * @return Master's filesystem {@link MasterFileSystem} utility class. - */ - MasterFileSystem getMasterFileSystem(); - - /** - * @return Master's WALs {@link MasterWalManager} utility class. - */ - MasterWalManager getMasterWalManager(); - - /** - * @return Master's {@link ServerManager} instance. - */ - ServerManager getServerManager(); - - /** - * @return Master's instance of {@link ExecutorService} - */ - ExecutorService getExecutorService(); - - /** - * @return Master's instance of {@link TableStateManager} - */ - TableStateManager getTableStateManager(); - - /** - * @return Master's instance of {@link MasterCoprocessorHost} - */ - MasterCoprocessorHost getMasterCoprocessorHost(); - - /** - * @return Master's instance of {@link MasterQuotaManager} - */ - MasterQuotaManager getMasterQuotaManager(); - - /** - * @return Master's instance of {@link RegionNormalizer} - */ - RegionNormalizer getRegionNormalizer(); - - /** - * @return Master's instance of {@link CatalogJanitor} - */ - CatalogJanitor getCatalogJanitor(); - - /** - * @return Master's instance of {@link ProcedureExecutor} - */ - ProcedureExecutor getMasterProcedureExecutor(); - - /** - * @return Tripped when Master has finished initialization. - */ - @VisibleForTesting - public ProcedureEvent getInitializedEvent(); - - /** - * @return Master's instance of {@link MetricsMaster} - */ - MetricsMaster getMasterMetrics(); - - /** - * Check table is modifiable; i.e. exists and is offline. - * @param tableName Name of table to check. - * @throws TableNotDisabledException - * @throws TableNotFoundException - * @throws IOException - */ - // We actually throw the exceptions mentioned in the - void checkTableModifiable(final TableName tableName) - throws IOException, TableNotFoundException, TableNotDisabledException; - - /** - * Create a table using the given table definition. - * @param desc The table definition - * @param splitKeys Starting row keys for the initial table regions. If null - * @param nonceGroup - * @param nonce - * a single region is created. - */ - long createTable( - final TableDescriptor desc, - final byte[][] splitKeys, - final long nonceGroup, - final long nonce) throws IOException; - - /** - * Create a system table using the given table definition. - * @param tableDescriptor The system table definition - * a single region is created. - */ - long createSystemTable(final TableDescriptor tableDescriptor) throws IOException; - - /** - * Delete a table - * @param tableName The table name - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long deleteTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException; - - /** - * Truncate a table - * @param tableName The table name - * @param preserveSplits True if the splits should be preserved - * @param nonceGroup - * @param nonce - * @throws IOException - */ - public long truncateTable( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) throws IOException; - - /** - * Modify the descriptor of an existing table - * @param tableName The table name - * @param descriptor The updated table descriptor - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long modifyTable( - final TableName tableName, - final TableDescriptor descriptor, - final long nonceGroup, - final long nonce) - throws IOException; - - /** - * Enable an existing table - * @param tableName The table name - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long enableTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException; - - /** - * Disable an existing table - * @param tableName The table name - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long disableTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException; - - - /** - * Add a new column to an existing table - * @param tableName The table name - * @param column The column definition - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long addColumn( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) - throws IOException; - - /** - * Modify the column descriptor of an existing column in an existing table - * @param tableName The table name - * @param descriptor The updated column definition - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long modifyColumn( - final TableName tableName, - final ColumnFamilyDescriptor descriptor, - final long nonceGroup, - final long nonce) - throws IOException; - - /** - * Delete a column from an existing table - * @param tableName The table name - * @param columnName The column name - * @param nonceGroup - * @param nonce - * @throws IOException - */ - long deleteColumn( - final TableName tableName, - final byte[] columnName, - final long nonceGroup, - final long nonce) - throws IOException; - - /** - * Merge regions in a table. - * @param regionsToMerge daughter regions to merge - * @param forcible whether to force to merge even two regions are not adjacent - * @param nonceGroup used to detect duplicate - * @param nonce used to detect duplicate - * @return procedure Id - * @throws IOException - */ - long mergeRegions( - final RegionInfo[] regionsToMerge, - final boolean forcible, - final long nonceGroup, - final long nonce) throws IOException; - - /** - * Split a region. - * @param regionInfo region to split - * @param splitRow split point - * @param nonceGroup used to detect duplicate - * @param nonce used to detect duplicate - * @return procedure Id - * @throws IOException - */ - long splitRegion( - final RegionInfo regionInfo, - final byte [] splitRow, - final long nonceGroup, - final long nonce) throws IOException; - - /** - * @return Return table descriptors implementation. - */ - TableDescriptors getTableDescriptors(); - - /** * @return true if master enables ServerShutdownHandler; */ boolean isServerCrashProcessingEnabled(); /** - * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint. - * - *

- * Only a single instance may be registered for a given {@link Service} subclass (the - * instances are keyed on {@link com.google.protobuf.Descriptors.ServiceDescriptor#getFullName()}. - * After the first registration, subsequent calls with the same service name will fail with - * a return value of {@code false}. - *

- * @param instance the {@code Service} subclass instance to expose as a coprocessor endpoint - * @return {@code true} if the registration was successful, {@code false} - * otherwise - */ - boolean registerService(Service instance); - - /** * @return true if master is the active one */ boolean isActiveMaster(); @@ -353,161 +83,25 @@ public interface MasterServices extends Server { boolean isInMaintenanceMode(); /** - * Abort a procedure. - * @param procId ID of the procedure - * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? - * @return true if aborted, false if procedure already completed or does not exist - * @throws IOException - */ - public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) - throws IOException; - - /** - * Get procedures - * @return procedure list - * @throws IOException - */ - public List> getProcedures() throws IOException; - - /** - * Get locks - * @return lock list - * @throws IOException - */ - public List getLocks() throws IOException; - - /** - * Get list of table descriptors by namespace - * @param name namespace name - * @return descriptors - * @throws IOException - */ - public List listTableDescriptorsByNamespace(String name) throws IOException; - - /** - * Get list of table names by namespace - * @param name namespace name - * @return table names - * @throws IOException - */ - public List listTableNamesByNamespace(String name) throws IOException; - - /** - * @param table the table for which last successful major compaction time is queried - * @return the timestamp of the last successful major compaction for the passed table, - * or 0 if no HFile resulting from a major compaction exists - * @throws IOException - */ - public long getLastMajorCompactionTimestamp(TableName table) throws IOException; - - /** - * @param regionName - * @return the timestamp of the last successful major compaction for the passed region - * or 0 if no HFile resulting from a major compaction exists - * @throws IOException - */ - public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException; - - /** - * @return load balancer - */ - public LoadBalancer getLoadBalancer(); - - /** * @return True if this master is stopping. */ boolean isStopping(); boolean isSplitOrMergeEnabled(MasterSwitchType switchType); - /** - * @return Favored Nodes Manager - */ - public FavoredNodesManager getFavoredNodesManager(); - - /** - * Add a new replication peer for replicating data to slave cluster - * @param peerId a short name that identifies the peer - * @param peerConfig configuration for the replication slave cluster - */ - void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException, IOException; - - /** - * Removes a peer and stops the replication - * @param peerId a short name that identifies the peer - */ - void removeReplicationPeer(String peerId) throws ReplicationException, IOException; - - /** - * Restart the replication stream to the specified peer - * @param peerId a short name that identifies the peer - */ - void enableReplicationPeer(String peerId) throws ReplicationException, IOException; - - /** - * Stop the replication stream to the specified peer - * @param peerId a short name that identifies the peer - */ - void disableReplicationPeer(String peerId) throws ReplicationException, IOException; - - /** - * Returns the configured ReplicationPeerConfig for the specified peer - * @param peerId a short name that identifies the peer - * @return ReplicationPeerConfig for the peer - */ - ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException, - IOException; - - /** - * Update the peerConfig for the specified peer - * @param peerId a short name that identifies the peer - * @param peerConfig new config for the peer - */ - void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException, IOException; - - /** - * Return a list of replication peers. - * @param regex The regular expression to match peer id - * @return a list of replication peers description - */ - List listReplicationPeers(String regex) throws ReplicationException, - IOException; - - /** - * Mark a region server as draining to prevent additional regions from getting assigned to it. - * @param server Region servers to drain. - */ - void drainRegionServer(final ServerName server); - - /** - * List region servers marked as draining to not get additional regions assigned to them. - * @return List of draining servers. - */ - List listDrainingRegionServers(); + public String getRegionServerVersion(final ServerName sn); - /** - * Remove drain from a region server to allow additional regions assignments. - * @param server Region server to remove drain from. - */ - void removeDrainFromRegionServer(final ServerName server); + String getClientIdAuditPrefix(); /** - * @return {@link LockManager} to lock namespaces/tables/regions. + * Add the listener to the notification list. + * @param listener The ServerListener to register */ - LockManager getLockManager(); - - public String getRegionServerVersion(final ServerName sn); - - public void checkIfShouldMoveSystemRegionAsync(); + void registerListener(final ServerListener listener); /** - * Recover meta table. Will result in no-op is meta is already initialized. Any code that has - * access to master and requires to access meta during process initialization can call this - * method to make sure meta is initialized. + * Remove the listener from the notification list. + * @param listener The ServerListener to unregister */ - boolean recoverMeta() throws IOException; - - String getClientIdAuditPrefix(); + boolean unregisterListener(final ServerListener listener); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index f6074d9094..7adecdd2a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -73,7 +73,7 @@ public class MasterWalManager { private final MetricsMasterFileSystem metricsMasterFilesystem = new MetricsMasterFileSystem(); // Keep around for convenience. - private final MasterServices services; + private final HMaster master; private final Configuration conf; private final FileSystem fs; @@ -89,18 +89,18 @@ public class MasterWalManager { // Is the fileystem ok? private volatile boolean fsOk = true; - public MasterWalManager(MasterServices services) throws IOException { - this(services.getConfiguration(), services.getMasterFileSystem().getWALFileSystem(), - services.getMasterFileSystem().getWALRootDir(), services); + public MasterWalManager(HMaster master) throws IOException { + this(master.getConfiguration(), master.getMasterFileSystem().getWALFileSystem(), + master.getMasterFileSystem().getWALRootDir(), master); } - public MasterWalManager(Configuration conf, FileSystem fs, Path rootDir, MasterServices services) + public MasterWalManager(Configuration conf, FileSystem fs, Path rootDir, HMaster master) throws IOException { this.fs = fs; this.conf = conf; this.rootDir = rootDir; - this.services = services; - this.splitLogManager = new SplitLogManager(services, conf); + this.master = master; + this.splitLogManager = new SplitLogManager(master, conf); this.distributedLogReplay = this.splitLogManager.isLogReplaying(); this.oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -140,7 +140,7 @@ public class MasterWalManager { FSUtils.checkFileSystemAvailable(this.fs); FSUtils.checkDfsSafeMode(this.conf); } catch (IOException e) { - services.abort("Shutting down HBase cluster: file system not available", e); + master.abort("Shutting down HBase cluster: file system not available", e); this.fsOk = false; } } @@ -159,7 +159,7 @@ public class MasterWalManager { Path logsDirPath = new Path(this.rootDir, HConstants.HREGION_LOGDIR_NAME); do { - if (services.isStopped()) { + if (master.isStopped()) { LOG.warn("Master stopped while trying to get failed servers."); break; } @@ -168,7 +168,7 @@ public class MasterWalManager { FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null); // Get online servers after getting log folders to avoid log folder deletion of newly // checked in region servers . see HBASE-5916 - Set onlineServers = services.getServerManager().getOnlineServers().keySet(); + Set onlineServers = master.getServerManager().getOnlineServers().keySet(); if (logFolders == null || logFolders.length == 0) { LOG.debug("No log files to split, proceeding..."); @@ -243,7 +243,7 @@ public class MasterWalManager { private List getLogDirs(final Set serverNames) throws IOException { List logDirs = new ArrayList<>(); boolean needReleaseLock = false; - if (!this.services.isInitialized()) { + if (!this.master.isInitialized()) { // during master initialization, we could have multiple places splitting a same wal this.splitLogLock.lock(); needReleaseLock = true; @@ -268,7 +268,7 @@ public class MasterWalManager { } } catch (IOException ioe) { if (!checkFileSystem()) { - this.services.abort("Aborting due to filesystem unavailable", ioe); + this.master.abort("Aborting due to filesystem unavailable", ioe); throw ioe; } } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerListener.java index fc38ad8c6c..4ecb53a164 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerListener.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ServerName; @@ -25,7 +26,7 @@ import org.apache.hadoop.hbase.ServerName; * Get notification of server registration events. The invocations are inline * so make sure your implementation is fast or else you'll slow hbase. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public interface ServerListener { /** * Started waiting on RegionServers to check-in. @@ -43,4 +44,4 @@ public interface ServerListener { * @param serverName The remote servers name. */ default void serverRemoved(final ServerName serverName) {}; -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index b63d55af10..ac711b6ba2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -135,7 +135,7 @@ public class ServerManager { /** List of region servers that should not get any more new regions. */ private final ArrayList drainingServers = new ArrayList<>(); - private final MasterServices master; + private final HMaster master; private final ClusterConnection connection; private final DeadServer deadservers = new DeadServer(); @@ -188,11 +188,11 @@ public class ServerManager { * @param master * @throws ZooKeeperConnectionException */ - public ServerManager(final MasterServices master) { + public ServerManager(final HMaster master) { this(master, true); } - ServerManager(final MasterServices master, final boolean connect) { + ServerManager(final HMaster master, final boolean connect) { this.master = master; Configuration c = master.getConfiguration(); maxSkew = c.getLong("hbase.master.maxclockskew", 30000); @@ -770,12 +770,8 @@ public class ServerManager { AdminService.BlockingInterface admin = this.rsAdmins.get(sn); if (admin == null) { LOG.debug("New admin connection to " + sn.toString()); - if (sn.equals(master.getServerName()) && master instanceof HRegionServer) { - // A master is also a region server now, see HBASE-10569 for details - admin = ((HRegionServer)master).getRSRpcServices(); - } else { - admin = this.connection.getAdmin(sn); - } + admin = sn.equals(master.getServerName())? + this.master.getRSRpcServices(): this.connection.getAdmin(sn); this.rsAdmins.put(sn, admin); } return admin; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 8027b6af5f..06a14cede6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -98,7 +98,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLo public class SplitLogManager { private static final Log LOG = LogFactory.getLog(SplitLogManager.class); - private final MasterServices server; + private final HMaster master; private final Configuration conf; private final ChoreService choreService; @@ -131,12 +131,12 @@ public class SplitLogManager { * @param conf the HBase configuration * @throws IOException */ - public SplitLogManager(MasterServices master, Configuration conf) + public SplitLogManager(HMaster master, Configuration conf) throws IOException { - this.server = master; + this.master = master; this.conf = conf; this.choreService = new ChoreService(master.getServerName() + "_splitLogManager_"); - if (server.getCoordinatedStateManager() != null) { + if (master.getCoordinatedStateManager() != null) { SplitLogManagerCoordination coordination = getSplitLogManagerCoordination(); Set failedDeletions = Collections.synchronizedSet(new HashSet()); SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions); @@ -153,7 +153,7 @@ public class SplitLogManager { } private SplitLogManagerCoordination getSplitLogManagerCoordination() { - return ((BaseCoordinatedStateManager) server.getCoordinatedStateManager()) + return ((BaseCoordinatedStateManager)master.getCoordinatedStateManager()) .getSplitLogManagerCoordination(); } @@ -356,7 +356,7 @@ public class SplitLogManager { } } batch.wait(100); - if (server.isStopped()) { + if (this.master.isStopped()) { LOG.warn("Stopped while waiting for log splits to be completed"); return; } @@ -564,7 +564,7 @@ public class SplitLogManager { * @return whether log is replaying */ public boolean isLogReplaying() { - if (server.getCoordinatedStateManager() == null) return false; + if (this.master.getCoordinatedStateManager() == null) return false; return getSplitLogManagerCoordination().isReplaying(); } @@ -572,7 +572,7 @@ public class SplitLogManager { * @return whether log is splitting */ public boolean isLogSplitting() { - if (server.getCoordinatedStateManager() == null) return false; + if (this.master.getCoordinatedStateManager() == null) return false; return getSplitLogManagerCoordination().isSplitting(); } @@ -665,7 +665,7 @@ public class SplitLogManager { @Override protected void chore() { - if (server.getCoordinatedStateManager() == null) return; + if (master.getCoordinatedStateManager() == null) return; int resubmitted = 0; int unassigned = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index a2704085c9..0a1c76e8be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; @@ -70,7 +69,7 @@ public class TableNamespaceManager { private static final Log LOG = LogFactory.getLog(TableNamespaceManager.class); private Configuration conf; - private MasterServices masterServices; + private HMaster master; private Table nsTable = null; // FindBugs: IS2_INCONSISTENT_SYNC TODO: Access is not synchronized private ZKNamespaceManager zkNamespaceManager; private boolean initialized; @@ -80,16 +79,16 @@ public class TableNamespaceManager { static final String NS_INIT_TIMEOUT = "hbase.master.namespace.init.timeout"; static final int DEFAULT_NS_INIT_TIMEOUT = 300000; - TableNamespaceManager(MasterServices masterServices) { - this.masterServices = masterServices; - this.conf = masterServices.getConfiguration(); + TableNamespaceManager(HMaster master) { + this.master = master; + this.conf = master.getConfiguration(); } public void start() throws IOException { - if (!MetaTableAccessor.tableExists(masterServices.getConnection(), + if (!MetaTableAccessor.tableExists(master.getConnection(), TableName.NAMESPACE_TABLE_NAME)) { LOG.info("Namespace table not found. Creating..."); - createNamespaceTable(masterServices); + createNamespaceTable(master); } try { @@ -198,13 +197,13 @@ public class TableNamespaceManager { } private void createNamespaceTable(MasterServices masterServices) throws IOException { - masterServices.createSystemTable(HTableDescriptor.NAMESPACE_TABLEDESC); + this.master.createSystemTable(HTableDescriptor.NAMESPACE_TABLEDESC); } @SuppressWarnings("deprecation") private boolean isTableNamespaceManagerInitialized() throws IOException { if (initialized) { - this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); + this.nsTable = this.master.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); return true; } return false; @@ -217,9 +216,9 @@ public class TableNamespaceManager { */ private void blockingCreateNamespace(final NamespaceDescriptor namespaceDescriptor) throws IOException { - ClusterSchema clusterSchema = this.masterServices.getClusterSchema(); + ClusterSchema clusterSchema = this.master.getClusterSchema(); long procId = clusterSchema.createNamespace(namespaceDescriptor, null); - block(this.masterServices, procId); + block(this.master, procId); } @@ -227,14 +226,13 @@ public class TableNamespaceManager { * An ugly utility to be removed when refactor TableNamespaceManager. * @throws TimeoutIOException */ - private static void block(final MasterServices services, final long procId) + private static void block(final HMaster master, final long procId) throws TimeoutIOException { - int timeoutInMillis = services.getConfiguration(). + int timeoutInMillis = master.getConfiguration(). getInt(ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY, ClusterSchema.DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT); long deadlineTs = EnvironmentEdgeManager.currentTime() + timeoutInMillis; - ProcedureExecutor procedureExecutor = - services.getMasterProcedureExecutor(); + ProcedureExecutor procedureExecutor = master.getMasterProcedureExecutor(); while(EnvironmentEdgeManager.currentTime() < deadlineTs) { if (procedureExecutor.isFinished(procId)) return; // Sleep some @@ -261,8 +259,8 @@ public class TableNamespaceManager { if (isTableAssigned() && isTableEnabled()) { try { boolean initGoodSofar = true; - nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); - zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper()); + nsTable = this.master.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); + zkNamespaceManager = new ZKNamespaceManager(master.getZooKeeper()); zkNamespaceManager.start(); if (get(nsTable, NamespaceDescriptor.DEFAULT_NAMESPACE.getName()) == null) { @@ -305,7 +303,7 @@ public class TableNamespaceManager { } private TableState.State getTableState() throws IOException { - return masterServices.getTableStateManager().getTableState(TableName.NAMESPACE_TABLE_NAME); + return master.getTableStateManager().getTableState(TableName.NAMESPACE_TABLE_NAME); } private boolean isTableEnabled() throws IOException { @@ -314,7 +312,7 @@ public class TableNamespaceManager { private boolean isTableAssigned() { // TODO: we have a better way now (wait on event) - return masterServices.getAssignmentManager() + return master.getAssignmentManager() .getRegionStates().hasTableRegionStates(TableName.NAMESPACE_TABLE_NAME); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 330b752b5c..667e57994f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -49,9 +49,9 @@ public class TableStateManager { private static final Log LOG = LogFactory.getLog(TableStateManager.class); private final ReadWriteLock lock = new ReentrantReadWriteLock(); - private final MasterServices master; + private final HMaster master; - public TableStateManager(MasterServices master) { + public TableStateManager(HMaster master) { this.master = master; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java index 01a5f833f4..e7609c6e70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java @@ -156,7 +156,7 @@ public class AssignProcedure extends RegionTransitionProcedure { return false; } // Don't assign if table is in disabling of disabled state. - TableStateManager tsm = env.getMasterServices().getTableStateManager(); + TableStateManager tsm = env.getMaster().getTableStateManager(); TableName tn = regionNode.getRegionInfo().getTable(); if (tsm.isTableState(tn, TableState.State.DISABLING, TableState.State.DISABLED)) { LOG.info("Table " + tn + " state=" + tsm.getTableState(tn) + ", skipping " + this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 22f734ce75..aa97d90766 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -53,8 +53,8 @@ import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; import org.apache.hadoop.hbase.master.AssignmentListener; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsAssignmentManager; import org.apache.hadoop.hbase.master.NoSuchProcedureException; import org.apache.hadoop.hbase.master.RegionPlan; @@ -164,7 +164,7 @@ public class AssignmentManager implements ServerListener { private final MetricsAssignmentManager metrics; private final RegionInTransitionChore ritChore; - private final MasterServices master; + private final HMaster master; private final AtomicBoolean running = new AtomicBoolean(false); private final RegionStates regionStates = new RegionStates(); @@ -179,11 +179,11 @@ public class AssignmentManager implements ServerListener { private Thread assignThread; - public AssignmentManager(final MasterServices master) { + public AssignmentManager(HMaster master) { this(master, new RegionStateStore(master)); } - public AssignmentManager(final MasterServices master, final RegionStateStore stateStore) { + public AssignmentManager(final HMaster master, final RegionStateStore stateStore) { this.master = master; this.regionStateStore = stateStore; this.metrics = new MetricsAssignmentManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java index 37521cc67f..c67f4a4a15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java @@ -87,7 +87,7 @@ extends AbstractStateMachineTableProcedure { setNextState(GCMergedRegionsState.GC_REGION_EDIT_METADATA); break; case GC_REGION_EDIT_METADATA: - MetaTableAccessor.deleteMergeQualifiers(env.getMasterServices().getConnection(), mergedChild); + MetaTableAccessor.deleteMergeQualifiers(env.getMaster().getConnection(), mergedChild); return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java index 805b870b38..f09fb6116e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; @@ -69,7 +70,7 @@ public class GCRegionProcedure extends AbstractStateMachineRegionProcedure splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); // The following code sets up a thread pool executor with as many slots as // there's files to split. It then fires up everything, waits for @@ -586,7 +586,7 @@ public class SplitTableRegionProcedure final List>> futures = new ArrayList>>(nbFiles); // Split each store file. - final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = env.getMaster().getTableDescriptors().get(getTableName()); for (Map.Entry>e: files.entrySet()) { byte [] familyName = Bytes.toBytes(e.getKey()); final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName); @@ -760,7 +760,7 @@ public class SplitTableRegionProcedure } private ServerName getParentRegionServerName(final MasterProcedureEnv env) { - return env.getMasterServices().getAssignmentManager() + return env.getMaster().getAssignmentManager() .getRegionStates().getRegionServerOfRegion(getParentRegion()); } @@ -791,7 +791,7 @@ public class SplitTableRegionProcedure } private int getRegionReplication(final MasterProcedureEnv env) throws IOException { - final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); + final TableDescriptor htd = env.getMaster().getTableDescriptors().get(getTableName()); return htd.getRegionReplication(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java index 66277bec11..4b7e6de818 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java @@ -245,7 +245,7 @@ public class UnassignProcedure extends RegionTransitionProcedure { } else { LOG.warn("Expiring server " + this + "; " + regionNode.toShortString() + ", exception=" + exception); - env.getMasterServices().getServerManager().expireServer(regionNode.getRegionLocation()); + env.getMaster().getServerManager().expireServer(regionNode.getRegionLocation()); // Return false so this procedure stays in suspended state. It will be woken up by a // ServerCrashProcedure when it notices this RIT. // TODO: Add a SCP as a new subprocedure that we now come to depend on. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java index e6b14955bd..cc89ac2b93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/Util.java @@ -53,10 +53,10 @@ class Util { final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow) throws IOException { // TODO: There is no timeout on this controller. Set one! - HBaseRpcController controller = env.getMasterServices().getClusterConnection(). + HBaseRpcController controller = env.getMaster().getClusterConnection(). getRpcControllerFactory().newController(); final AdminService.BlockingInterface admin = - env.getMasterServices().getClusterConnection().getAdmin(regionLocation); + env.getMaster().getClusterConnection().getAdmin(regionLocation); GetRegionInfoRequest request = null; if (includeBestSplitRow) { request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index a05ad674a6..4098972f0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RackManager; @@ -59,6 +60,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; +import org.apache.yetus.audience.InterfaceAudience; /** * The base class for load balancers. It provides the the functions used to by @@ -67,6 +69,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets; * actual balancing algorithm. * */ +@InterfaceAudience.Private public abstract class BaseLoadBalancer implements LoadBalancer { protected static final int MIN_SERVER_BALANCE = 2; private volatile boolean stopped = false; @@ -1008,7 +1011,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { protected MetricsBalancer metricsBalancer = null; protected ClusterStatus clusterStatus = null; protected ServerName masterServerName; - protected MasterServices services; + private HMaster master; protected boolean tablesOnMaster; protected boolean onlySystemTablesOnMaster; @@ -1038,6 +1041,10 @@ public abstract class BaseLoadBalancer implements LoadBalancer { ", systemTablesOnMaster=" + this.onlySystemTablesOnMaster); } + protected HMaster getMaster() { + return this.master; + } + protected void setSlop(Configuration conf) { this.slop = conf.getFloat("hbase.regions.slop", (float) 0.2); this.overallSlop = conf.getFloat("hbase.regions.overallSlop", slop); @@ -1141,11 +1148,11 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } @Override - public void setMasterServices(MasterServices masterServices) { - masterServerName = masterServices.getServerName(); - this.services = masterServices; + public void setMaster(HMaster master) { + masterServerName = master.getServerName(); + this.master = master; if (useRegionFinder) { - this.regionFinder.setServices(masterServices); + this.regionFinder.setMaster(master); } } @@ -1307,7 +1314,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { } private List findIdleServers(List servers) { - return this.services.getServerManager() + return this.master.getServerManager() .getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR); } @@ -1549,8 +1556,8 @@ public abstract class BaseLoadBalancer implements LoadBalancer { protected Map> getRegionAssignmentsByServer( Collection regions) { - if (this.services != null && this.services.getAssignmentManager() != null) { - return this.services.getAssignmentManager().getSnapShotOfAssignment(regions); + if (this.master != null && this.master.getAssignmentManager() != null) { + return this.master.getAssignmentManager().getSnapShotOfAssignment(regions); } else { return new HashMap<>(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java index a2fe9a25f2..2bf4acc3df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredStochasticBalancer.java @@ -43,8 +43,8 @@ import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.favored.FavoredNodesPlan; import org.apache.hadoop.hbase.favored.FavoredNodesPlan.Position; import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.util.Pair; @@ -87,9 +87,9 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements } @Override - public void setMasterServices(MasterServices masterServices) { - super.setMasterServices(masterServices); - fnm = masterServices.getFavoredNodesManager(); + public void setMaster(HMaster master) { + super.setMaster(master); + fnm = master.getFavoredNodesManager(); } /* @@ -275,8 +275,8 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements // Assign the region to the one with a lower load (both have the desired hdfs blocks) ServerName s; - ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); - ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); + ServerLoad tertiaryLoad = getMaster().getServerManager().getLoad(tertiaryHost); + ServerLoad secondaryLoad = getMaster().getServerManager().getLoad(secondaryHost); if (secondaryLoad != null && tertiaryLoad != null) { if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { s = secondaryHost; @@ -695,7 +695,7 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements @Override public List balanceCluster(Map> clusterState) { - if (this.services != null) { + if (getMaster() != null) { List regionPlans = Lists.newArrayList(); Map> correctAssignments = new HashMap<>(); @@ -717,7 +717,7 @@ public class FavoredStochasticBalancer extends StochasticLoadBalancer implements LOG.warn("Region not on favored nodes, unassign. Region: " + hri + " current: " + current + " favored nodes: " + favoredNodes); try { - this.services.getAssignmentManager().unassign(hri); + getMaster().getAssignmentManager().unassign(hri); } catch (IOException e) { LOG.warn("Failed unassign", e); continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index f5502cc9c9..4e50ac6c3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -66,7 +67,7 @@ class RegionLocationFinder { private static final HDFSBlocksDistribution EMPTY_BLOCK_DISTRIBUTION = new HDFSBlocksDistribution(); private Configuration conf; private volatile ClusterStatus status; - private MasterServices services; + private HMaster master; private final ListeningExecutorService executor; // Do not scheduleFullRefresh at master startup private long lastFullRefresh = EnvironmentEdgeManager.currentTime(); @@ -106,7 +107,6 @@ class RegionLocationFinder { /** * Create a cache for region to list of servers - * @param time time to cache the locations * @return A new Cache. */ private LoadingCache createCache() { @@ -123,8 +123,8 @@ class RegionLocationFinder { this.conf = conf; } - public void setServices(MasterServices services) { - this.services = services; + public void setMaster(HMaster master) { + this.master = master; } public void setClusterStatus(ClusterStatus status) { @@ -144,11 +144,11 @@ class RegionLocationFinder { */ private boolean scheduleFullRefresh() { // Protect from anything being null while starting up. - if (services == null) { + if (this.master == null) { return false; } - final AssignmentManager am = services.getAssignmentManager(); + final AssignmentManager am = this.master.getAssignmentManager(); if (am == null) { return false; } @@ -218,8 +218,8 @@ class RegionLocationFinder { protected TableDescriptor getTableDescriptor(TableName tableName) throws IOException { TableDescriptor tableDescriptor = null; try { - if (this.services != null && this.services.getTableDescriptors() != null) { - tableDescriptor = this.services.getTableDescriptors().get(tableName); + if (this.master != null && this.master.getTableDescriptors() != null) { + tableDescriptor = this.master.getTableDescriptors().get(tableName); } } catch (FileNotFoundException fnfe) { LOG.debug("FileNotFoundException during getTableDescriptors." + " Current table name = " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 23d9cb47c6..34218efc61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; @@ -177,10 +178,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable); minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance); if (localityCandidateGenerator == null) { - localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); + localityCandidateGenerator = new LocalityBasedCandidateGenerator(getMaster()); } - localityCost = new ServerLocalityCostFunction(conf, services); - rackLocalityCost = new RackLocalityCostFunction(conf, services); + localityCost = new ServerLocalityCostFunction(conf, getMaster()); + rackLocalityCost = new RackLocalityCostFunction(conf, getMaster()); if (this.candidateGenerators == null) { candidateGenerators = Lists.newArrayList(); @@ -237,7 +238,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { // update metrics size try { // by-table or ensemble mode - int tablesCount = isByTable ? services.getTableDescriptors().getAll().size() : 1; + int tablesCount = isByTable ? getMaster().getTableDescriptors().getAll().size() : 1; int functionsCount = getCostFunctionNames().length; updateMetricsSize(tablesCount * (functionsCount + 1)); // +1 for overall @@ -256,11 +257,11 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { } @Override - public synchronized void setMasterServices(MasterServices masterServices) { - super.setMasterServices(masterServices); - this.localityCost.setServices(masterServices); - this.rackLocalityCost.setServices(masterServices); - this.localityCandidateGenerator.setServices(masterServices); + public synchronized void setMaster(HMaster master) { + super.setMaster(master); + this.localityCost.setMaster(master); + this.rackLocalityCost.setMaster(master); + this.localityCandidateGenerator.setMaster(master); } @Override @@ -782,15 +783,15 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { static class LocalityBasedCandidateGenerator extends CandidateGenerator { - private MasterServices masterServices; + private HMaster master; - LocalityBasedCandidateGenerator(MasterServices masterServices) { - this.masterServices = masterServices; + LocalityBasedCandidateGenerator(HMaster master) { + this.master = master; } @Override Cluster.Action generate(Cluster cluster) { - if (this.masterServices == null) { + if (this.master == null) { int thisServer = pickRandomServer(cluster); // Pick the other server int otherServer = pickOtherRandomServer(cluster, thisServer); @@ -848,8 +849,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { return cluster.getOrComputeWeightedLocality(region, server, LocalityType.SERVER); } - void setServices(MasterServices services) { - this.masterServices = services; + void setMaster(HMaster master) { + this.master = master; } } @@ -1251,17 +1252,17 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private double bestLocality; // best case locality across cluster weighted by local data size private double locality; // current locality across cluster weighted by local data size - private MasterServices services; + private HMaster master; LocalityBasedCostFunction(Configuration conf, - MasterServices srv, + HMaster master, LocalityType type, String localityCostKey, float defaultLocalityCost) { super(conf); this.type = type; this.setMultiplier(conf.getFloat(localityCostKey, defaultLocalityCost)); - this.services = srv; + this.master = master; this.locality = 0.0; this.bestLocality = 0.0; } @@ -1271,8 +1272,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { */ abstract int regionIndexToEntityIndex(int region); - public void setServices(MasterServices srvc) { - this.services = srvc; + public void setMaster(HMaster master) { + this.master = master; } @Override @@ -1282,7 +1283,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { bestLocality = 0.0; // If no master, no computation will work, so assume 0 cost - if (this.services == null) { + if (this.master == null) { return; } @@ -1301,7 +1302,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { protected void regionMoved(int region, int oldServer, int newServer) { int oldEntity = type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer]; int newEntity = type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer]; - if (this.services == null) { + if (this.master == null) { return; } double localityDelta = getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity); @@ -1329,10 +1330,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private static final String LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost"; private static final float DEFAULT_LOCALITY_COST = 25; - ServerLocalityCostFunction(Configuration conf, MasterServices srv) { + ServerLocalityCostFunction(Configuration conf, HMaster master) { super( conf, - srv, + master, LocalityType.SERVER, LOCALITY_COST_KEY, DEFAULT_LOCALITY_COST @@ -1350,10 +1351,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { private static final String RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost"; private static final float DEFAULT_RACK_LOCALITY_COST = 15; - public RackLocalityCostFunction(Configuration conf, MasterServices services) { + public RackLocalityCostFunction(Configuration conf, HMaster master) { super( conf, - services, + master, LocalityType.RACK, RACK_LOCALITY_COST_KEY, DEFAULT_RACK_LOCALITY_COST diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java index 23e5a666b2..bc7bd4f0ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; import org.apache.hadoop.hbase.util.Bytes; @@ -51,9 +51,9 @@ public class ReplicationMetaCleaner extends ScheduledChore { private static final Log LOG = LogFactory.getLog(ReplicationMetaCleaner.class); private final Admin admin; - private final MasterServices master; + private final HMaster master; - public ReplicationMetaCleaner(MasterServices master, Stoppable stoppable, int period) + public ReplicationMetaCleaner(HMaster master, Stoppable stoppable, int period) throws IOException { super("ReplicationMetaCleaner", stoppable, period); this.master = master; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java index 883d6596ca..e081986270 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java index 74edd26d28..980ac5388c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/RegionNormalizer.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterRpcServices; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.yetus.audience.InterfaceAudience; @@ -43,9 +43,9 @@ public interface RegionNormalizer { /** * Set the master service. Must be called before first call to * {@link #computePlanForTable(TableName)}. - * @param masterServices master services to use + * @param master master services to use */ - void setMasterServices(MasterServices masterServices); + void setMaster(HMaster master); /** * Set the master RPC service. Must be called before first call to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java index 8190f271ce..052ce9ab06 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterRpcServices; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; import org.apache.yetus.audience.InterfaceAudience; @@ -62,17 +62,16 @@ public class SimpleRegionNormalizer implements RegionNormalizer { private static final Log LOG = LogFactory.getLog(SimpleRegionNormalizer.class); private static final int MIN_REGION_COUNT = 3; - private MasterServices masterServices; + private HMaster master; private MasterRpcServices masterRpcServices; private static long[] skippedCount = new long[NormalizationPlan.PlanType.values().length]; /** * Set the master service. - * @param masterServices inject instance of MasterServices + * @param master inject instance of MasterServices */ - @Override - public void setMasterServices(MasterServices masterServices) { - this.masterServices = masterServices; + public void setMaster(HMaster master) { + this.master = master; } @Override @@ -120,7 +119,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer { } List plans = new ArrayList<>(); - List tableRegions = masterServices.getAssignmentManager().getRegionStates(). + List tableRegions = master.getAssignmentManager().getRegionStates(). getRegionsOfTable(table); //TODO: should we make min number of regions a config param? @@ -152,19 +151,8 @@ public class SimpleRegionNormalizer implements RegionNormalizer { LOG.debug("Table " + table + ", average region size: " + avgRegionSize); int candidateIdx = 0; - boolean splitEnabled = true, mergeEnabled = true; - try { - splitEnabled = masterRpcServices.isSplitOrMergeEnabled(null, - RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT)).getEnabled(); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException e) { - LOG.debug("Unable to determine whether split is enabled", e); - } - try { - mergeEnabled = masterRpcServices.isSplitOrMergeEnabled(null, - RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE)).getEnabled(); - } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException e) { - LOG.debug("Unable to determine whether split is enabled", e); - } + boolean splitEnabled = this.master.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); + boolean mergeEnabled = this.master.isSplitOrMergeEnabled(MasterSwitchType.MERGE); while (candidateIdx < tableRegions.size()) { RegionInfo hri = tableRegions.get(candidateIdx); long regionSize = getRegionSize(hri); @@ -203,9 +191,9 @@ public class SimpleRegionNormalizer implements RegionNormalizer { } private long getRegionSize(RegionInfo hri) { - ServerName sn = masterServices.getAssignmentManager().getRegionStates(). + ServerName sn = master.getAssignmentManager().getRegionStates(). getRegionServerOfRegion(hri); - RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn). + RegionLoad regionLoad = master.getServerManager().getLoad(sn). getRegionsLoad().get(hri.getRegionName()); if (regionLoad == null) { LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java index e711ca0bd3..f471286fd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java @@ -88,7 +88,7 @@ public abstract class AbstractStateMachineRegionProcedure @Override protected void checkTableModifiable(final MasterProcedureEnv env) throws IOException { // Checks whether the table exists - if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) { + if (!MetaTableAccessor.tableExists(env.getMaster().getConnection(), getTableName())) { throw new TableNotFoundException(getTableName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java index d67d9f9a3a..e1930b6508 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java @@ -110,7 +110,7 @@ public abstract class AbstractStateMachineTableProcedure */ protected void checkTableModifiable(final MasterProcedureEnv env) throws IOException { // Checks whether the table exists - if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) { + if (!MetaTableAccessor.tableExists(env.getMaster().getConnection(), getTableName())) { throw new TableNotFoundException(getTableName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java index 835152454b..8d83c45304 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java @@ -232,7 +232,7 @@ public class AddColumnFamilyProcedure checkTableModifiable(env); // In order to update the descriptor, we need to retrieve the old descriptor for comparison. - unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + unmodifiedTableDescriptor = env.getMaster().getTableDescriptors().get(tableName); if (unmodifiedTableDescriptor == null) { throw new IOException("TableDescriptor missing for " + tableName); } @@ -261,7 +261,7 @@ public class AddColumnFamilyProcedure // Update table descriptor LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString()); - TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + TableDescriptor htd = env.getMaster().getTableDescriptors().get(tableName); if (htd.hasColumnFamily(cfDescriptor.getName())) { // It is possible to reach this situation, as we could already add the column family @@ -270,7 +270,7 @@ public class AddColumnFamilyProcedure return; } - env.getMasterServices().getTableDescriptors().add( + env.getMaster().getTableDescriptors().add( TableDescriptorBuilder.newBuilder(htd) .addColumnFamily(cfDescriptor).build()); } @@ -281,14 +281,14 @@ public class AddColumnFamilyProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + TableDescriptor htd = env.getMaster().getTableDescriptors().get(tableName); if (htd.hasColumnFamily(cfDescriptor.getName())) { // Remove the column family from file system and update the table descriptor to // the before-add-column-family-state MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName, getRegionInfoList(env), cfDescriptor.getName(), cfDescriptor.isMobEnabled()); - env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor); + env.getMaster().getTableDescriptors().add(unmodifiedTableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index 6155f1633a..67a0753329 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -118,7 +118,7 @@ public class CloneSnapshotProcedure } private void restoreSnapshotAcl(MasterProcedureEnv env) throws IOException { - Configuration conf = env.getMasterServices().getConfiguration(); + Configuration conf = env.getMaster().getConfiguration(); if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null && SnapshotDescriptionUtils.isSecurityAvailable(conf)) { RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, tableDescriptor.getTableName(), conf); @@ -314,7 +314,7 @@ public class CloneSnapshotProcedure */ private void prepareClone(final MasterProcedureEnv env) throws IOException { final TableName tableName = getTableName(); - if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + if (MetaTableAccessor.tableExists(env.getMaster().getConnection(), tableName)) { throw new TableExistsException(getTableName()); } } @@ -329,7 +329,7 @@ public class CloneSnapshotProcedure throws IOException, InterruptedException { if (!getTableName().isSystemTable()) { // Check and update namespace quota - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); SnapshotManifest manifest = SnapshotManifest.open( env.getMasterConfiguration(), @@ -379,7 +379,7 @@ public class CloneSnapshotProcedure final Path tableRootDir, final TableName tableName, final List newRegions) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); final FileSystem fs = mfs.getFileSystem(); final Path rootDir = mfs.getRootDir(); final Configuration conf = env.getMasterConfiguration(); @@ -434,13 +434,13 @@ public class CloneSnapshotProcedure final TableDescriptor tableDescriptor, List newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); final Path tempdir = mfs.getTempDir(); // 1. Create Table Descriptor // using a copy of descriptor, table will be created enabling first final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName()); - ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) + ((FSTableDescriptors)(env.getMaster().getTableDescriptors())) .createTableDescriptorForTableDirectory(tempTableDir, TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false); @@ -465,7 +465,7 @@ public class CloneSnapshotProcedure RestoreSnapshotHelper.RestoreMetaChanges metaChanges = new RestoreSnapshotHelper.RestoreMetaChanges( tableDescriptor, parentsToChildrenPairMap); - metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions); + metaChanges.updateMetaParentRegions(env.getMaster().getConnection(), newRegions); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java index fa743bdbdd..dba77b7b81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java @@ -160,7 +160,7 @@ public class CreateNamespaceProcedure @Override protected LockState acquireLock(final MasterProcedureEnv env) { - if (!env.getMasterServices().isInitialized()) { + if (!env.getMaster().isInitialized()) { // Namespace manager might not be ready if master is not fully initialized, // return false to reject user namespace creation; return true for default // and system namespace creation (this is part of master initialization). @@ -205,7 +205,7 @@ public class CreateNamespaceProcedure protected static void createDirectory( final MasterProcedureEnv env, final NamespaceDescriptor nsDescriptor) throws IOException { - MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); mfs.getFileSystem().mkdirs( FSUtils.getNamespaceDir(mfs.getRootDir(), nsDescriptor.getName())); } @@ -243,8 +243,8 @@ public class CreateNamespaceProcedure protected static void setNamespaceQuota( final MasterProcedureEnv env, final NamespaceDescriptor nsDescriptor) throws IOException { - if (env.getMasterServices().isInitialized()) { - env.getMasterServices().getMasterQuotaManager().setNamespaceQuota(nsDescriptor); + if (env.getMaster().isInitialized()) { + env.getMaster().getMasterQuotaManager().setNamespaceQuota(nsDescriptor); } } @@ -263,7 +263,7 @@ public class CreateNamespaceProcedure } private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) { - return env.getMasterServices().getClusterSchema().getTableNamespaceManager(); + return env.getMaster().getClusterSchema().getTableNamespaceManager(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index e9804dd200..0c092696c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; +import java.net.InetAddress; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -28,15 +30,19 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.ipc.RpcCall; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; @@ -230,7 +236,7 @@ public class CreateTableProcedure private boolean prepareCreate(final MasterProcedureEnv env) throws IOException { final TableName tableName = getTableName(); - if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + if (MetaTableAccessor.tableExists(env.getMaster().getConnection(), tableName)) { setFailure("master-create-table", new TableExistsException(getTableName())); return false; } @@ -242,6 +248,21 @@ public class CreateTableProcedure return false; } + if (tableDescriptor.getTableName().getNamespaceAsString(). + equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + // Only allow local Master process create tables in the system namespace. + // If no remote address, let the create go ahead. + // I tried writing a test for this but our little testing utility doesn't provide client + // address. + Optional remoteAddress = RpcServer.getRemoteAddress(); + if (remoteAddress.isPresent() && !Addressing.isLocalAddress(remoteAddress.get())) { + setFailure("master-create-table", new DoNotRetryIOException("Only Master can " + + "can create tables in the system namespace; localAddress=" + Addressing.getIpAddress() + + ", remoteAddress= " + remoteAddress.get())); + return false; + } + } + return true; } @@ -296,13 +317,13 @@ public class CreateTableProcedure protected static List createFsLayout(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, List newRegions, final CreateHdfsRegions hdfsRegionHandler) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); final Path tempdir = mfs.getTempDir(); // 1. Create Table Descriptor // using a copy of descriptor, table will be created enabling first final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName()); - ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) + ((FSTableDescriptors)(env.getMaster().getTableDescriptors())) .createTableDescriptorForTableDirectory( tempTableDir, tableDescriptor, false); @@ -320,7 +341,7 @@ public class CreateTableProcedure final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final Path tempTableDir) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName()); FileSystem fs = mfs.getFileSystem(); if (!fs.delete(tableDir, true) && fs.exists(tableDir)) { @@ -357,14 +378,14 @@ public class CreateTableProcedure protected static void setEnablingState(final MasterProcedureEnv env, final TableName tableName) throws IOException { // Mark the table as Enabling - env.getMasterServices().getTableStateManager() + env.getMaster().getTableStateManager() .setTableState(tableName, TableState.State.ENABLING); } protected static void setEnabledState(final MasterProcedureEnv env, final TableName tableName) throws IOException { // Enable table - env.getMasterServices().getTableStateManager() + env.getMaster().getTableStateManager() .setTableState(tableName, TableState.State.ENABLED); } @@ -374,13 +395,13 @@ public class CreateTableProcedure private static void addRegionsToMeta(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final List regionInfos) throws IOException { - MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), + MetaTableAccessor.addRegionsToMeta(env.getMaster().getConnection(), regionInfos, tableDescriptor.getRegionReplication()); } protected static void updateTableDescCache(final MasterProcedureEnv env, final TableName tableName) throws IOException { - env.getMasterServices().getTableDescriptors().get(tableName); + env.getMaster().getTableDescriptors().get(tableName); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java index fd9937834a..79431a8830 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java @@ -238,7 +238,7 @@ public class DeleteColumnFamilyProcedure checkTableModifiable(env); // In order to update the descriptor, we need to retrieve the old descriptor for comparison. - unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + unmodifiedTableDescriptor = env.getMaster().getTableDescriptors().get(tableName); if (unmodifiedTableDescriptor == null) { throw new IOException("TableDescriptor missing for " + tableName); } @@ -275,7 +275,7 @@ public class DeleteColumnFamilyProcedure // Update table descriptor LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName()); - TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + TableDescriptor htd = env.getMaster().getTableDescriptors().get(tableName); if (!htd.hasColumnFamily(familyName)) { // It is possible to reach this situation, as we could already delete the column family @@ -284,7 +284,7 @@ public class DeleteColumnFamilyProcedure return; } - env.getMasterServices().getTableDescriptors().add( + env.getMaster().getTableDescriptors().add( TableDescriptorBuilder.newBuilder(htd).removeColumnFamily(familyName).build()); } @@ -294,7 +294,7 @@ public class DeleteColumnFamilyProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor); + env.getMaster().getTableDescriptors().add(unmodifiedTableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java index 9646946e89..e44958739d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java @@ -197,7 +197,7 @@ public class DeleteNamespaceProcedure int tableCount = 0; try { - tableCount = env.getMasterServices().listTableDescriptorsByNamespace(namespaceName).size(); + tableCount = env.getMaster().listTableDescriptorsByNamespace(namespaceName).size(); } catch (FileNotFoundException fnfe) { throw new NamespaceNotFoundException(namespaceName); } @@ -275,7 +275,7 @@ public class DeleteNamespaceProcedure protected static void deleteDirectory( final MasterProcedureEnv env, final String namespaceName) throws IOException { - MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); FileSystem fs = mfs.getFileSystem(); Path p = FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName); @@ -317,7 +317,7 @@ public class DeleteNamespaceProcedure protected static void removeNamespaceQuota( final MasterProcedureEnv env, final String namespaceName) throws IOException { - env.getMasterServices().getMasterQuotaManager().removeNamespaceQuota(namespaceName); + env.getMaster().getMasterQuotaManager().removeNamespaceQuota(namespaceName); } /** @@ -335,7 +335,7 @@ public class DeleteNamespaceProcedure } private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) { - return env.getMasterServices().getClusterSchema().getTableNamespaceManager(); + return env.getMaster().getClusterSchema().getTableNamespaceManager(); } /** * The procedure could be restarted from a different machine. If the variable is null, we need to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 4cc18755ec..22b8d99299 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -240,7 +240,7 @@ public class DeleteTableProcedure private boolean prepareDelete(final MasterProcedureEnv env) throws IOException { try { - env.getMasterServices().checkTableModifiable(tableName); + env.getMaster().checkTableModifiable(tableName); } catch (TableNotFoundException|TableNotDisabledException e) { setFailure("master-delete-table", e); return false; @@ -272,7 +272,7 @@ public class DeleteTableProcedure protected static void deleteFromFs(final MasterProcedureEnv env, final TableName tableName, final List regions, final boolean archive) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); final FileSystem fs = mfs.getFileSystem(); final Path tempdir = mfs.getTempDir(); @@ -349,7 +349,7 @@ public class DeleteTableProcedure */ private static void cleanAnyRemainingRows(final MasterProcedureEnv env, final TableName tableName) throws IOException { - Connection connection = env.getMasterServices().getConnection(); + Connection connection = env.getMaster().getConnection(); Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { List deletes = new ArrayList<>(); @@ -368,16 +368,16 @@ public class DeleteTableProcedure protected static void deleteFromMeta(final MasterProcedureEnv env, final TableName tableName, List regions) throws IOException { - MetaTableAccessor.deleteRegions(env.getMasterServices().getConnection(), regions); + MetaTableAccessor.deleteRegions(env.getMaster().getConnection(), regions); // Clean any remaining rows for this table. cleanAnyRemainingRows(env, tableName); // clean region references from the server manager - env.getMasterServices().getServerManager().removeRegions(regions); + env.getMaster().getServerManager().removeRegions(regions); // Clear Favored Nodes for this table - FavoredNodesManager fnm = env.getMasterServices().getFavoredNodesManager(); + FavoredNodesManager fnm = env.getMaster().getFavoredNodesManager(); if (fnm != null) { fnm.deleteFavoredNodesForRegions(regions); } @@ -387,17 +387,17 @@ public class DeleteTableProcedure final TableName tableName) throws IOException { // Clean up regions of the table in RegionStates. LOG.debug("Removing '" + tableName + "' from region states."); - env.getMasterServices().getAssignmentManager().deleteTable(tableName); + env.getMaster().getAssignmentManager().deleteTable(tableName); // If entry for this table states, remove it. LOG.debug("Marking '" + tableName + "' as deleted."); - env.getMasterServices().getTableStateManager().setDeletedTable(tableName); + env.getMaster().getTableStateManager().setDeletedTable(tableName); } protected static void deleteTableDescriptorCache(final MasterProcedureEnv env, final TableName tableName) throws IOException { LOG.debug("Removing '" + tableName + "' descriptor."); - env.getMasterServices().getTableDescriptors().remove(tableName); + env.getMaster().getTableDescriptors().remove(tableName); } protected static void deleteTableStates(final MasterProcedureEnv env, final TableName tableName) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index 045ee9e7e8..e86b0d11fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -215,7 +215,7 @@ public class DisableTableProcedure if (tableName.equals(TableName.META_TABLE_NAME)) { setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table")); canTableBeDisabled = false; - } else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + } else if (!MetaTableAccessor.tableExists(env.getMaster().getConnection(), tableName)) { setFailure("master-disable-table", new TableNotFoundException(tableName)); canTableBeDisabled = false; } else if (!skipTableStateCheck) { @@ -228,7 +228,7 @@ public class DisableTableProcedure // the state to DISABLING from ENABLED. The implementation was done before table lock // was implemented. With table lock, there is no need to set the state here (it will // set the state later on). A quick state check should be enough for us to move forward. - TableStateManager tsm = env.getMasterServices().getTableStateManager(); + TableStateManager tsm = env.getMaster().getTableStateManager(); TableState.State state = tsm.getTableState(tableName); if (!state.equals(TableState.State.ENABLED)){ LOG.info("Table " + tableName + " isn't enabled;is "+state.name()+"; skipping disable"); @@ -265,7 +265,7 @@ public class DisableTableProcedure final MasterProcedureEnv env, final TableName tableName) throws IOException { // Set table disabling flag up in zk. - env.getMasterServices().getTableStateManager().setTableState( + env.getMaster().getTableStateManager().setTableState( tableName, TableState.State.DISABLING); } @@ -279,7 +279,7 @@ public class DisableTableProcedure final MasterProcedureEnv env, final TableName tableName) throws IOException { // Flip the table to disabled - env.getMasterServices().getTableStateManager().setTableState( + env.getMaster().getTableStateManager().setTableState( tableName, TableState.State.DISABLED); LOG.info("Disabled table, " + tableName + ", is completed."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index 6c94effd0b..51f139d54a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -111,11 +111,10 @@ public class EnableTableProcedure setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE); break; case ENABLE_TABLE_MARK_REGIONS_ONLINE: - Connection connection = env.getMasterServices().getConnection(); + Connection connection = env.getMaster().getConnection(); // we will need to get the tableDescriptor here to see if there is a change in the replica // count - TableDescriptor hTableDescriptor = - env.getMasterServices().getTableDescriptors().get(tableName); + TableDescriptor hTableDescriptor = env.getMaster().getTableDescriptors().get(tableName); // Get the replica count int regionReplicaCount = hTableDescriptor.getRegionReplication(); @@ -318,7 +317,7 @@ public class EnableTableProcedure boolean canTableBeEnabled = true; // Check whether table exists - if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + if (!MetaTableAccessor.tableExists(env.getMaster().getConnection(), tableName)) { setFailure("master-enable-table", new TableNotFoundException(tableName)); canTableBeEnabled = false; } else if (!skipTableStateCheck) { @@ -331,7 +330,7 @@ public class EnableTableProcedure // the state to ENABLING from DISABLED. The implementation was done before table lock // was implemented. With table lock, there is no need to set the state here (it will // set the state later on). A quick state check should be enough for us to move forward. - TableStateManager tsm = env.getMasterServices().getTableStateManager(); + TableStateManager tsm = env.getMaster().getTableStateManager(); TableState.State state = tsm.getTableState(tableName); if(!state.equals(TableState.State.DISABLED)){ LOG.info("Table " + tableName + " isn't disabled;is "+state.name()+"; skipping enable"); @@ -370,7 +369,7 @@ public class EnableTableProcedure final TableName tableName) throws IOException { // Set table disabling flag up in zk. LOG.info("Attempting to enable the table " + tableName); - env.getMasterServices().getTableStateManager().setTableState( + env.getMaster().getTableStateManager().setTableState( tableName, TableState.State.ENABLING); } @@ -384,7 +383,7 @@ public class EnableTableProcedure final MasterProcedureEnv env, final TableName tableName) throws IOException { // Flip the table to Enabled - env.getMasterServices().getTableStateManager().setTableState( + env.getMaster().getTableStateManager().setTableState( tableName, TableState.State.ENABLED); LOG.info("Table '" + tableName + "' was successfully enabled."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java index 02ecdc6b3d..c628ec03ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -50,7 +50,7 @@ public final class MasterDDLOperationHelper { final List regionInfoList, final byte[] familyName, final boolean hasMob) throws IOException { - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); if (LOG.isDebugEnabled()) { LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java index f294f57b62..ac598a9065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java @@ -96,13 +96,13 @@ public class MasterProcedureEnv implements ConfigurationObserver { private final RSProcedureDispatcher remoteDispatcher; private final MasterProcedureScheduler procSched; - private final MasterServices master; + private final HMaster master; - public MasterProcedureEnv(final MasterServices master) { + public MasterProcedureEnv(HMaster master) { this(master, new RSProcedureDispatcher(master)); } - public MasterProcedureEnv(final MasterServices master, + public MasterProcedureEnv(HMaster master, final RSProcedureDispatcher remoteDispatcher) { this.master = master; this.procSched = new MasterProcedureScheduler(master.getConfiguration()); @@ -113,7 +113,7 @@ public class MasterProcedureEnv implements ConfigurationObserver { return RpcServer.getRequestUser().orElse(Superusers.getSystemUser()); } - public MasterServices getMasterServices() { + public HMaster getMaster() { return master; } @@ -151,10 +151,7 @@ public class MasterProcedureEnv implements ConfigurationObserver { } public boolean waitServerCrashProcessingEnabled(Procedure proc) { - if (master instanceof HMaster) { - return procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), proc); - } - return false; + return procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), proc); } public boolean waitFailoverCleanup(Procedure proc) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java index 27e67b0961..384f9a2d3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @@ -67,13 +66,13 @@ public final class MasterProcedureUtil { * See submitProcedure() for an example. */ public static abstract class NonceProcedureRunnable { - private final MasterServices master; + private final ProcedureExecutor executor; private final NonceKey nonceKey; private Long procId; - public NonceProcedureRunnable(final MasterServices master, + public NonceProcedureRunnable(final ProcedureExecutor executor, final long nonceGroup, final long nonce) { - this.master = master; + this.executor = executor; this.nonceKey = getProcedureExecutor().createNonceKey(nonceGroup, nonce); } @@ -81,12 +80,8 @@ public final class MasterProcedureUtil { return nonceKey; } - protected MasterServices getMaster() { - return master; - } - protected ProcedureExecutor getProcedureExecutor() { - return master.getMasterProcedureExecutor(); + return this.executor; } protected long getProcId() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java index b6d7faa732..cdb4761613 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java @@ -222,7 +222,7 @@ public class ModifyColumnFamilyProcedure // Checks whether the table is allowed to be modified. checkTableModifiable(env); - unmodifiedtableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + unmodifiedtableDescriptor = env.getMaster().getTableDescriptors().get(tableName); if (unmodifiedtableDescriptor == null) { throw new IOException("TableDescriptor missing for " + tableName); } @@ -251,9 +251,9 @@ public class ModifyColumnFamilyProcedure // Update table descriptor LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString()); - TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(env.getMasterServices().getTableDescriptors().get(tableName)); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(env.getMaster().getTableDescriptors().get(tableName)); builder.modifyColumnFamily(cfDescriptor); - env.getMasterServices().getTableDescriptors().add(builder.build()); + env.getMaster().getTableDescriptors().add(builder.build()); } /** @@ -262,7 +262,7 @@ public class ModifyColumnFamilyProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(unmodifiedtableDescriptor); + env.getMaster().getTableDescriptors().add(unmodifiedtableDescriptor); // Make sure regions are opened after table descriptor is updated. //reOpenAllRegionsIfTableIsOnline(env); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java index 697a2ea668..621c7f3044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java @@ -201,7 +201,7 @@ public class ModifyNamespaceProcedure } private TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) { - return env.getMasterServices().getClusterSchema().getTableNamespaceManager(); + return env.getMaster().getClusterSchema().getTableNamespaceManager(); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index bda8b81e98..fc271babf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -237,7 +237,7 @@ public class ModifyTableProcedure */ private void prepareModify(final MasterProcedureEnv env) throws IOException { // Checks whether the table exists - if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) { + if (!MetaTableAccessor.tableExists(env.getMaster().getConnection(), getTableName())) { throw new TableNotFoundException(getTableName()); } @@ -249,9 +249,9 @@ public class ModifyTableProcedure // In order to update the descriptor, we need to retrieve the old descriptor for comparison. this.unmodifiedTableDescriptor = - env.getMasterServices().getTableDescriptors().get(getTableName()); + env.getMaster().getTableDescriptors().get(getTableName()); - if (env.getMasterServices().getTableStateManager() + if (env.getMaster().getTableStateManager() .isTableState(getTableName(), TableState.State.ENABLED)) { if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor .getRegionReplication()) { @@ -289,7 +289,7 @@ public class ModifyTableProcedure * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor); + env.getMaster().getTableDescriptors().add(modifiedTableDescriptor); } /** @@ -298,7 +298,7 @@ public class ModifyTableProcedure * @throws IOException **/ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor); + env.getMaster().getTableDescriptors().add(unmodifiedTableDescriptor); // delete any new column families from the modifiedTableDescriptor. deleteFromFs(env, modifiedTableDescriptor, unmodifiedTableDescriptor); @@ -343,7 +343,7 @@ public class ModifyTableProcedure if (newReplicaCount < oldReplicaCount) { Set tableRows = new HashSet<>(); - Connection connection = env.getMasterServices().getConnection(); + Connection connection = env.getMaster().getConnection(); Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); @@ -360,7 +360,7 @@ public class ModifyTableProcedure } } if (newReplicaCount > oldReplicaCount) { - Connection connection = env.getMasterServices().getConnection(); + Connection connection = env.getMaster().getConnection(); // Get the existing table regions List existingTableRegions = MetaTableAccessor.getTableRegions(connection, getTableName()); @@ -377,7 +377,7 @@ public class ModifyTableProcedure private static void addRegionsToMeta(final MasterProcedureEnv env, final TableDescriptor tableDescriptor, final List regionInfos) throws IOException { - MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), regionInfos, + MetaTableAccessor.addRegionsToMeta(env.getMaster().getConnection(), regionInfos, tableDescriptor.getRegionReplication()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java index 18fa91db4d..9d911c6710 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java @@ -199,8 +199,8 @@ public final class ProcedureSyncWait { protected static void waitMetaRegions(final MasterProcedureEnv env) throws IOException { int timeout = env.getMasterConfiguration().getInt("hbase.client.catalog.timeout", 10000); try { - if (env.getMasterServices().getMetaTableLocator().waitMetaRegionLocation( - env.getMasterServices().getZooKeeper(), timeout) == null) { + if (env.getMaster().getMetaTableLocator().waitMetaRegionLocation( + env.getMaster().getZooKeeper(), timeout) == null) { throw new NotAllMetaRegionsOnlineException(); } } catch (InterruptedException e) { @@ -228,7 +228,7 @@ public final class ProcedureSyncWait { new ProcedureSyncWait.Predicate() { @Override public MasterQuotaManager evaluate() throws IOException { - return env.getMasterServices().getMasterQuotaManager(); + return env.getMaster().getMasterQuotaManager(); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java index a0c06ce2b0..bb6b5c127e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.ServerListener; import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -48,13 +48,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProc import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.yetus.audience.InterfaceAudience; /** * A remote procecdure dispatcher for regionservers. */ +@InterfaceAudience.Private public class RSProcedureDispatcher - extends RemoteProcedureDispatcher - implements ServerListener { + extends RemoteProcedureDispatcher implements ServerListener { private static final Log LOG = LogFactory.getLog(RSProcedureDispatcher.class); public static final String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY = @@ -63,10 +64,10 @@ public class RSProcedureDispatcher private static final int RS_VERSION_WITH_EXEC_PROCS = 0x0200000; // 2.0 - protected final MasterServices master; + protected final HMaster master; protected final long rsStartupWaitTime; - public RSProcedureDispatcher(final MasterServices master) { + public RSProcedureDispatcher(HMaster master) { super(master.getConfiguration()); this.master = master; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java index fe3a445c3d..0abf56b3ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java @@ -250,7 +250,7 @@ public class RecoverMetaProcedure */ private void prepare(MasterProcedureEnv env) { if (master == null) { - master = (HMaster) env.getMasterServices(); + master = (HMaster) env.getMaster(); Preconditions.checkArgument(master != null); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index 2cf558437a..d0bfc21fbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -327,12 +327,12 @@ public class RestoreSnapshotProcedure private void prepareRestore(final MasterProcedureEnv env) throws IOException { final TableName tableName = getTableName(); // Checks whether the table exists - if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + if (!MetaTableAccessor.tableExists(env.getMaster().getConnection(), tableName)) { throw new TableNotFoundException(tableName); } // Check whether table is disabled. - env.getMasterServices().checkTableModifiable(tableName); + env.getMaster().checkTableModifiable(tableName); // Check that we have at least 1 CF if (modifiedTableDescriptor.getColumnFamilyCount() == 0) { @@ -342,7 +342,7 @@ public class RestoreSnapshotProcedure if (!getTableName().isSystemTable()) { // Table already exist. Check and update the region quota for this table namespace. - final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final MasterFileSystem mfs = env.getMaster().getMasterFileSystem(); SnapshotManifest manifest = SnapshotManifest.open( env.getMasterConfiguration(), mfs.getFileSystem(), @@ -365,7 +365,7 @@ public class RestoreSnapshotProcedure * @throws IOException **/ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { - env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor); + env.getMaster().getTableDescriptors().add(modifiedTableDescriptor); } /** @@ -374,7 +374,7 @@ public class RestoreSnapshotProcedure * @throws IOException **/ private void restoreSnapshot(final MasterProcedureEnv env) throws IOException { - MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem(); + MasterFileSystem fileSystemManager = env.getMaster().getMasterFileSystem(); FileSystem fs = fileSystemManager.getFileSystem(); Path rootDir = fileSystemManager.getRootDir(); final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher(); @@ -383,9 +383,9 @@ public class RestoreSnapshotProcedure try { Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); SnapshotManifest manifest = SnapshotManifest.open( - env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot); + env.getMaster().getConfiguration(), fs, snapshotDir, snapshot); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - env.getMasterServices().getConfiguration(), + env.getMaster().getConfiguration(), fs, manifest, modifiedTableDescriptor, @@ -403,7 +403,7 @@ public class RestoreSnapshotProcedure + " failed in on-disk restore. Try re-running the restore command."; LOG.error(msg, e); monitorException.receive( - new ForeignException(env.getMasterServices().getServerName().toString(), e)); + new ForeignException(env.getMaster().getServerName().toString(), e)); throw new IOException(msg, e); } } @@ -415,7 +415,7 @@ public class RestoreSnapshotProcedure **/ private void updateMETA(final MasterProcedureEnv env) throws IOException { try { - Connection conn = env.getMasterServices().getConnection(); + Connection conn = env.getMaster().getConnection(); // 1. Prepare to restore getMonitorStatus().setStatus("Preparing to restore each region"); @@ -466,7 +466,7 @@ public class RestoreSnapshotProcedure + " failed in meta update. Try re-running the restore command."; LOG.error(msg, e); monitorException.receive( - new ForeignException(env.getMasterServices().getServerName().toString(), e)); + new ForeignException(env.getMaster().getServerName().toString(), e)); throw new IOException(msg, e); } @@ -479,10 +479,10 @@ public class RestoreSnapshotProcedure private void restoreSnapshotAcl(final MasterProcedureEnv env) throws IOException { if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null && SnapshotDescriptionUtils - .isSecurityAvailable(env.getMasterServices().getConfiguration())) { + .isSecurityAvailable(env.getMaster().getConfiguration())) { // restore acl of snapshot to table. RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, TableName.valueOf(snapshot.getTable()), - env.getMasterServices().getConfiguration()); + env.getMaster().getConfiguration()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index a0ee628257..62c83ff7c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterWalManager; import org.apache.hadoop.hbase.master.assignment.AssignProcedure; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -104,11 +104,11 @@ implements ServerProcedureInterface { @Override protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) throws ProcedureSuspendedException, ProcedureYieldException { - final MasterServices services = env.getMasterServices(); + final HMaster master = env.getMaster(); // HBASE-14802 // If we have not yet notified that we are processing a dead server, we should do now. if (!notifiedDeadServer) { - services.getServerManager().getDeadServers().notifyServer(serverName); + master.getServerManager().getDeadServers().notifyServer(serverName); notifiedDeadServer = true; } @@ -130,7 +130,7 @@ implements ServerProcedureInterface { throw new ProcedureSuspendedException(); } - this.regionsOnCrashedServer = services.getAssignmentManager().getRegionStates() + this.regionsOnCrashedServer = master.getAssignmentManager().getRegionStates() .getServerRegionInfoSet(serverName); // Where to go next? Depends on whether we should split logs at all or // if we should do distributed log splitting. @@ -172,7 +172,7 @@ implements ServerProcedureInterface { break; case SERVER_CRASH_FINISH: - services.getServerManager().getDeadServers().finish(serverName); + master.getServerManager().getDeadServers().finish(serverName); return Flow.NO_MORE_STATE; default: @@ -193,7 +193,7 @@ implements ServerProcedureInterface { this.serverName); // Assign meta if still carrying it. Check again: region may be assigned because of RIT timeout - final AssignmentManager am = env.getMasterServices().getAssignmentManager(); + final AssignmentManager am = env.getMaster().getAssignmentManager(); for (RegionInfo hri: am.getRegionStates().getServerRegionInfoSet(serverName)) { if (!isDefaultMetaRegion(hri)) continue; @@ -217,8 +217,8 @@ implements ServerProcedureInterface { if (LOG.isDebugEnabled()) { LOG.debug("Splitting WALs " + this); } - MasterWalManager mwm = env.getMasterServices().getMasterWalManager(); - AssignmentManager am = env.getMasterServices().getAssignmentManager(); + MasterWalManager mwm = env.getMaster().getMasterWalManager(); + AssignmentManager am = env.getMaster().getAssignmentManager(); // TODO: For Matteo. Below BLOCKs!!!! Redo so can relinquish executor while it is running. // PROBLEM!!! WE BLOCK HERE. mwm.splitLog(this.serverName); @@ -366,7 +366,7 @@ implements ServerProcedureInterface { */ private void handleRIT(final MasterProcedureEnv env, final List regions) { if (regions == null) return; - AssignmentManager am = env.getMasterServices().getAssignmentManager(); + AssignmentManager am = env.getMaster().getAssignmentManager(); final Iterator it = regions.iterator(); ServerCrashException sce = null; while (it.hasNext()) { @@ -399,6 +399,6 @@ implements ServerProcedureInterface { @Override protected ProcedureMetrics getProcedureMetrics(MasterProcedureEnv env) { - return env.getMasterServices().getMasterMetrics().getServerCrashProcMetrics(); + return env.getMaster().getMasterMetrics().getServerCrashProcMetrics(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index c82f8d1f1a..80cfcab6fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -96,7 +96,7 @@ public class TruncateTableProcedure setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META); break; case TRUNCATE_TABLE_REMOVE_FROM_META: - tableDescriptor = env.getMasterServices().getTableDescriptors() + tableDescriptor = env.getMaster().getTableDescriptors() .get(tableName); DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); DeleteTableProcedure.deleteAssignmentState(env, getTableName()); @@ -269,7 +269,7 @@ public class TruncateTableProcedure private boolean prepareTruncate(final MasterProcedureEnv env) throws IOException { try { - env.getMasterServices().checkTableModifiable(getTableName()); + env.getMaster().checkTableModifiable(getTableName()); } catch (TableNotFoundException|TableNotDisabledException e) { setFailure("master-truncate-table", e); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java index 84c154f8bf..e7c0ca7bae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; @@ -54,11 +55,11 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler { /** * @param snapshot descriptor of the snapshot to take - * @param masterServices master services provider + * @param master master services provider */ public DisabledTableSnapshotHandler(SnapshotDescription snapshot, - final MasterServices masterServices, final SnapshotManager snapshotManager) { - super(snapshot, masterServices, snapshotManager); + HMaster master, final SnapshotManager snapshotManager) { + super(snapshot, master, snapshotManager); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java index 399a1274e4..6ee4593f81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure.Procedure; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; @@ -49,7 +49,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler { private static final Log LOG = LogFactory.getLog(EnabledTableSnapshotHandler.class); private final ProcedureCoordinator coordinator; - public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master, + public EnabledTableSnapshotHandler(SnapshotDescription snapshot, HMaster master, final SnapshotManager manager) { super(snapshot, master, manager); this.coordinator = manager.getCoordinator(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index b698082f74..12cc79281d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; @@ -83,16 +83,16 @@ public final class MasterSnapshotVerifier { private FileSystem fs; private Path rootDir; private TableName tableName; - private MasterServices services; + private HMaster master; /** - * @param services services for the master + * @param master master for the master * @param snapshot snapshot to check * @param rootDir root directory of the hbase installation. */ - public MasterSnapshotVerifier(MasterServices services, SnapshotDescription snapshot, Path rootDir) { - this.fs = services.getMasterFileSystem().getFileSystem(); - this.services = services; + public MasterSnapshotVerifier(HMaster master, SnapshotDescription snapshot, Path rootDir) { + this.fs = master.getMasterFileSystem().getFileSystem(); + this.master = master; this.snapshot = snapshot; this.rootDir = rootDir; this.tableName = TableName.valueOf(snapshot.getTable()); @@ -108,7 +108,7 @@ public final class MasterSnapshotVerifier { */ public void verifySnapshot(Path snapshotDir, Set snapshotServers) throws CorruptedSnapshotException, IOException { - SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs, + SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs, snapshotDir, snapshot); // verify snapshot info matches verifySnapshotDescription(snapshotDir); @@ -159,9 +159,9 @@ public final class MasterSnapshotVerifier { private void verifyRegions(final SnapshotManifest manifest) throws IOException { List regions; if (TableName.META_TABLE_NAME.equals(tableName)) { - regions = new MetaTableLocator().getMetaRegions(services.getZooKeeper()); + regions = new MetaTableLocator().getMetaRegions(master.getZooKeeper()); } else { - regions = MetaTableAccessor.getTableRegions(services.getConnection(), tableName); + regions = MetaTableAccessor.getTableRegions(master.getConnection(), tableName); } // Remove the non-default regions RegionReplicaUtil.removeNonDefaultRegions(regions); @@ -207,7 +207,7 @@ public final class MasterSnapshotVerifier { } // Verify Snapshot HFiles - SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(), fs, manifest); + SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java index f3ca993576..b0b7344f91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java @@ -60,7 +60,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { /** File cache for HFiles in the completed and currently running snapshots */ private SnapshotFileCache cache; - private MasterServices master; + private HMaster master; @Override public synchronized Iterable getDeletableFiles(Iterable files) { @@ -77,7 +77,7 @@ public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate { @Override public void init(Map params) { if (params.containsKey(HMaster.MASTER)) { - this.master = (MasterServices) params.get(HMaster.MASTER); + this.master = (HMaster)params.get(HMaster.MASTER); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 20a4f39935..2cc81991db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; @@ -139,7 +142,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable private static final int SNAPSHOT_POOL_THREADS_DEFAULT = 1; private boolean stopped; - private MasterServices master; // Needed by TableEventHandlers + // Strange. Master is passed on construction and on initialization. + private HMaster master; // Needed by TableEventHandlers private ProcedureCoordinator coordinator; // Is snapshot feature enabled? @@ -171,18 +175,13 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * */ private KeyLocker locks = new KeyLocker<>(); - - - public SnapshotManager() {} - /** * Fully specify all necessary components of a snapshot manager. Exposed for testing. * @param master services for the master where the manager is running * @param coordinator procedure coordinator instance. exposed for testing. * @param pool HBase ExecutorServcie instance, exposed for testing. */ - public SnapshotManager(final MasterServices master, final MetricsMaster metricsMaster, - ProcedureCoordinator coordinator, ExecutorService pool) + public SnapshotManager(HMaster master, ProcedureCoordinator coordinator, ExecutorService pool) throws IOException, UnsupportedOperationException { this.master = master; @@ -1113,7 +1112,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) throws KeeperException, + public void initialize(HMaster master, MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 808cab5b35..4cc5bf3fd8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.SnapshotSentinel; import org.apache.hadoop.hbase.master.locking.LockManager; @@ -74,7 +74,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh private volatile boolean finished; // none of these should ever be null - protected final MasterServices master; + protected final HMaster master; protected final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); protected final SnapshotDescription snapshot; protected final Configuration conf; @@ -94,15 +94,15 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh /** * @param snapshot descriptor of the snapshot to take - * @param masterServices master services provider + * @param master master services provider */ - public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices, + public TakeSnapshotHandler(SnapshotDescription snapshot, HMaster master, final SnapshotManager snapshotManager) { - super(masterServices, EventType.C_M_SNAPSHOT_TABLE); + super(master, EventType.C_M_SNAPSHOT_TABLE); assert snapshot != null : "SnapshotDescription must not be nul1"; - assert masterServices != null : "MasterServices must not be nul1"; + assert master != null : "MasterServices must not be nul1"; - this.master = masterServices; + this.master = master; this.snapshot = snapshot; this.snapshotManager = snapshotManager; this.snapshotTable = TableName.valueOf(snapshot.getTable()); @@ -114,12 +114,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh this.monitor = new ForeignExceptionDispatcher(snapshot.getName()); this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor); - this.tableLock = master.getLockManager().createMasterLock( + this.tableLock = this.master.getLockManager().createMasterLock( snapshotTable, LockType.EXCLUSIVE, this.getClass().getName() + ": take snapshot " + snapshot.getName()); // prepare the verify - this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, rootDir); + this.verifier = new MasterSnapshotVerifier(master, snapshot, rootDir); // update the running tasks this.status = TaskMonitor.get().createStatus( "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable); @@ -174,10 +174,10 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh List> regionsAndLocations; if (TableName.META_TABLE_NAME.equals(snapshotTable)) { regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations( - server.getZooKeeper()); + this.master.getZooKeeper()); } else { regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( - server.getConnection(), snapshotTable, false); + this.master.getConnection(), snapshotTable, false); } // run the snapshot diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java index efe2c1e957..2e953e7032 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.yetus.audience.InterfaceAudience; @@ -40,11 +40,11 @@ import org.apache.yetus.audience.InterfaceAudience; public class NamespaceAuditor { private static final Log LOG = LogFactory.getLog(NamespaceAuditor.class); private NamespaceStateManager stateManager; - private MasterServices masterServices; + private final HMaster master; - public NamespaceAuditor(MasterServices masterServices) { - this.masterServices = masterServices; - stateManager = new NamespaceStateManager(masterServices); + public NamespaceAuditor(HMaster master) { + this.master = master; + stateManager = new NamespaceStateManager(master); } public void start() throws IOException { @@ -66,7 +66,7 @@ public class NamespaceAuditor { public void checkQuotaToCreateTable(TableName tName, int regions) throws IOException { if (stateManager.isInitialized()) { // We do this check to fail fast. - if (MetaTableAccessor.tableExists(this.masterServices.getConnection(), tName)) { + if (MetaTableAccessor.tableExists(this.master.getConnection(), tName)) { throw new TableExistsException(tName); } stateManager.checkAndUpdateNamespaceTableCount(tName, regions); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java index c62594adc8..a6af22fbf3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.TableNamespaceManager; import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.util.Bytes; @@ -43,12 +43,12 @@ class NamespaceStateManager { private static final Log LOG = LogFactory.getLog(NamespaceStateManager.class); private ConcurrentMap nsStateCache; - private MasterServices master; + private final HMaster master; private volatile boolean initialized = false; - public NamespaceStateManager(MasterServices masterServices) { + public NamespaceStateManager(HMaster master) { nsStateCache = new ConcurrentHashMap<>(); - master = masterServices; + this.master = master; } /** @@ -184,7 +184,7 @@ class NamespaceStateManager { /** * Delete the namespace state. * - * @param An instance of NamespaceTableAndRegionInfo + * @param namespace instance of NamespaceTableAndRegionInfo */ void deleteNamespace(String namespace) { this.nsStateCache.remove(namespace); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java index 89e55d136d..f4f5efdcd1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java @@ -19,10 +19,10 @@ package org.apache.hadoop.hbase.procedure; import java.io.IOException; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.zookeeper.KeeperException; @@ -54,8 +54,7 @@ import org.apache.zookeeper.KeeperException; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class MasterProcedureManager extends ProcedureManager implements - Stoppable { +public abstract class MasterProcedureManager extends ProcedureManager implements Stoppable { /** * Initialize a globally barriered procedure for master. * @@ -64,7 +63,7 @@ public abstract class MasterProcedureManager extends ProcedureManager implements * @throws IOException * @throws UnsupportedOperationException */ - public abstract void initialize(MasterServices master, MetricsMaster metricsMaster) + public abstract void initialize(HMaster master, MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java index 222c9334b8..dcde197717 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java @@ -21,8 +21,9 @@ import java.io.IOException; import java.util.Hashtable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MetricsMaster; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; /** @@ -30,9 +31,8 @@ import org.apache.zookeeper.KeeperException; * master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster} * interacts with the loaded procedure manager through this class. */ -public class MasterProcedureManagerHost extends - ProcedureManagerHost { - +@InterfaceAudience.Private +public class MasterProcedureManagerHost extends ProcedureManagerHost { private Hashtable procedureMgrMap = new Hashtable<>(); @Override @@ -43,7 +43,7 @@ public class MasterProcedureManagerHost extends } } - public void initialize(MasterServices master, final MetricsMaster metricsMaster) + public void initialize(HMaster master, final MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException { for (MasterProcedureManager mpm : getProcedureManagers()) { mpm.initialize(master, metricsMaster); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java index 8627ee584a..5e5d23c038 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import org.apache.yetus.audience.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class ProcedureManager { - /** * Return the unique signature of the procedure. This signature uniquely * identifies the procedure. By default, this signature is the string used in diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java index 81f24752cc..857a384408 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.procedure; import java.io.IOException; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -34,10 +35,10 @@ public abstract class RegionServerProcedureManager extends ProcedureManager { /** * Initialize a globally barriered procedure for region servers. * - * @param rss Region Server service interface + * @param hrs Region Server service interface * @throws KeeperException */ - public abstract void initialize(RegionServerServices rss) throws KeeperException; + public abstract void initialize(HRegionServer hrs) throws KeeperException; /** * Start accepting procedure requests. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java index 0f4ea64586..ed002ebdc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java @@ -23,8 +23,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; /** @@ -33,16 +35,17 @@ import org.apache.zookeeper.KeeperException; * {@link org.apache.hadoop.hbase.regionserver.HRegionServer} interacts * with the loaded procedure manager through this class. */ +@InterfaceAudience.Private public class RegionServerProcedureManagerHost extends ProcedureManagerHost { private static final Log LOG = LogFactory .getLog(RegionServerProcedureManagerHost.class); - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(HRegionServer hrs) throws KeeperException { for (RegionServerProcedureManager proc : procedures) { LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing"); - proc.initialize(rss); + proc.initialize(hrs); LOG.debug("Procedure " + proc.getProcedureSignature() + " is initialized"); } } @@ -74,5 +77,4 @@ public class RegionServerProcedureManagerHost extends // load the default flush region procedure manager procedures.add(new RegionServerFlushTableProcedureManager()); } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java index 66f9240f7b..af5b0787a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure.Procedure; @@ -67,7 +67,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { private static final Log LOG = LogFactory.getLog(MasterFlushTableProcedureManager.class); - private MasterServices master; + private HMaster master; private ProcedureCoordinator coordinator; private Map procMap = new HashMap<>(); private boolean stopped; @@ -86,7 +86,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) + public void initialize(HMaster master, MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 49192e18e6..4e6b221ad2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -48,14 +48,13 @@ import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; /** * This manager class handles flushing of the regions for table on a {@link HRegionServer}. */ -@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +@InterfaceAudience.LimitedPrivate((HBaseInterfaceAudience.CONFIG)) public class RegionServerFlushTableProcedureManager extends RegionServerProcedureManager { private static final Log LOG = LogFactory.getLog(RegionServerFlushTableProcedureManager.class); @@ -75,20 +74,20 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur "hbase.flush.procedure.region.wakefrequency"; private static final long FLUSH_REQUEST_WAKE_MILLIS_DEFAULT = 500; - private RegionServerServices rss; + private HRegionServer hrs; private ProcedureMemberRpcs memberRpcs; private ProcedureMember member; /** * Exposed for testing. * @param conf HBase configuration. - * @param server region server. + * @param hrs region server. * @param memberRpc use specified memberRpc instance * @param procMember use specified ProcedureMember */ - RegionServerFlushTableProcedureManager(Configuration conf, HRegionServer server, + RegionServerFlushTableProcedureManager(Configuration conf, HRegionServer hrs, ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { - this.rss = server; + this.hrs = hrs; this.memberRpcs = memberRpc; this.member = procMember; } @@ -100,8 +99,8 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur */ @Override public void start() { - LOG.debug("Start region server flush procedure manager " + rss.getServerName().toString()); - this.memberRpcs.start(rss.getServerName().toString(), member); + LOG.debug("Start region server flush procedure manager " + hrs.getServerName().toString()); + this.memberRpcs.start(hrs.getServerName().toString(), member); } /** @@ -133,9 +132,9 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur public Subprocedure buildSubprocedure(String table) { // don't run the subprocedure if the parent is stop(ping) - if (rss.isStopping() || rss.isStopped()) { + if (hrs.isStopping() || hrs.isStopped()) { throw new IllegalStateException("Can't start flush region subprocedure on RS: " - + rss.getServerName() + ", because stopping/stopped!"); + + hrs.getServerName() + ", because stopping/stopped!"); } // check to see if this server is hosting any regions for the table @@ -152,14 +151,14 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur LOG.debug("Launching subprocedure to flush regions for " + table); ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(table); - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); long timeoutMillis = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT); long wakeMillis = conf.getLong(FLUSH_REQUEST_WAKE_MILLIS_KEY, FLUSH_REQUEST_WAKE_MILLIS_DEFAULT); FlushTableSubprocedurePool taskManager = - new FlushTableSubprocedurePool(rss.getServerName().toString(), conf, rss); + new FlushTableSubprocedurePool(hrs.getServerName().toString(), conf, hrs); return new FlushTableSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, involvedRegions, table, taskManager); } @@ -175,7 +174,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur * @throws IOException */ private List getRegionsToFlush(String table) throws IOException { - return rss.getRegions(TableName.valueOf(table)); + return hrs.getRegions(TableName.valueOf(table)); } public class FlushTableSubprocedureBuilder implements SubprocedureFactory { @@ -313,22 +312,22 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur /** * Initialize this region server flush procedure manager * Uses a zookeeper based member controller. - * @param rss region server + * @param hrs region server * @throws KeeperException if the zookeeper cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws KeeperException { - this.rss = rss; - ZooKeeperWatcher zkw = rss.getZooKeeper(); + public void initialize(HRegionServer hrs) throws KeeperException { + this.hrs = hrs; + ZooKeeperWatcher zkw = hrs.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE); - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); long keepAlive = conf.getLong(FLUSH_TIMEOUT_MILLIS_KEY, FLUSH_TIMEOUT_MILLIS_DEFAULT); int opThreads = conf.getInt(FLUSH_REQUEST_THREADS_KEY, FLUSH_REQUEST_THREADS_DEFAULT); // create the actual flush table procedure member - ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), + ThreadPoolExecutor pool = ProcedureMember.defaultPool(hrs.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new FlushTableSubprocedureBuilder()); } @@ -337,5 +336,4 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur public String getProcedureSignature() { return MasterFlushTableProcedureManager.FLUSH_TABLE_PROCEDURE_SIGNATURE; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 0587cc7062..008b37f74d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.namespace.NamespaceAuditor; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -63,7 +63,7 @@ public class MasterQuotaManager implements RegionStateListener { private static final Map EMPTY_MAP = Collections.unmodifiableMap( new HashMap<>()); - private final MasterServices masterServices; + private final HMaster master; private NamedLock namespaceLocks; private NamedLock tableLocks; private NamedLock userLocks; @@ -71,19 +71,19 @@ public class MasterQuotaManager implements RegionStateListener { private NamespaceAuditor namespaceQuotaManager; private ConcurrentHashMap regionSizes; - public MasterQuotaManager(final MasterServices masterServices) { - this.masterServices = masterServices; + public MasterQuotaManager(final HMaster master) { + this.master = master; } public void start() throws IOException { // If the user doesn't want the quota support skip all the initializations. - if (!QuotaUtil.isQuotaEnabled(masterServices.getConfiguration())) { + if (!QuotaUtil.isQuotaEnabled(this.master.getConfiguration())) { LOG.info("Quota support disabled"); return; } // Create the quota table if missing - if (!MetaTableAccessor.tableExists(masterServices.getConnection(), + if (!MetaTableAccessor.tableExists(this.master.getConnection(), QuotaUtil.QUOTA_TABLE_NAME)) { LOG.info("Quota table not found. Creating..."); createQuotaTable(); @@ -95,7 +95,7 @@ public class MasterQuotaManager implements RegionStateListener { userLocks = new NamedLock<>(); regionSizes = new ConcurrentHashMap<>(); - namespaceQuotaManager = new NamespaceAuditor(masterServices); + namespaceQuotaManager = new NamespaceAuditor(this.master); namespaceQuotaManager.start(); initialized = true; } @@ -155,23 +155,23 @@ public class MasterQuotaManager implements RegionStateListener { @Override public GlobalQuotaSettings fetch() throws IOException { return new GlobalQuotaSettings(req.getUserName(), null, null, QuotaUtil.getUserQuota( - masterServices.getConnection(), userName)); + master.getConnection(), userName)); } @Override public void update(GlobalQuotaSettings quotaPojo) throws IOException { - QuotaUtil.addUserQuota(masterServices.getConnection(), userName, quotaPojo.toQuotas()); + QuotaUtil.addUserQuota(master.getConnection(), userName, quotaPojo.toQuotas()); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName); + QuotaUtil.deleteUserQuota(master.getConnection(), userName); } @Override public void preApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().preSetUserQuota(userName, quotaPojo); + master.getMasterCoprocessorHost().preSetUserQuota(userName, quotaPojo); } @Override public void postApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().postSetUserQuota(userName, quotaPojo); + master.getMasterCoprocessorHost().postSetUserQuota(userName, quotaPojo); } }); } @@ -182,24 +182,24 @@ public class MasterQuotaManager implements RegionStateListener { @Override public GlobalQuotaSettings fetch() throws IOException { return new GlobalQuotaSettings(userName, table, null, QuotaUtil.getUserQuota( - masterServices.getConnection(), userName, table)); + master.getConnection(), userName, table)); } @Override public void update(GlobalQuotaSettings quotaPojo) throws IOException { - QuotaUtil.addUserQuota(masterServices.getConnection(), userName, table, + QuotaUtil.addUserQuota(master.getConnection(), userName, table, quotaPojo.toQuotas()); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, table); + QuotaUtil.deleteUserQuota(master.getConnection(), userName, table); } @Override public void preApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().preSetUserQuota(userName, table, quotaPojo); + master.getMasterCoprocessorHost().preSetUserQuota(userName, table, quotaPojo); } @Override public void postApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().postSetUserQuota(userName, table, quotaPojo); + master.getMasterCoprocessorHost().postSetUserQuota(userName, table, quotaPojo); } }); } @@ -210,26 +210,25 @@ public class MasterQuotaManager implements RegionStateListener { @Override public GlobalQuotaSettings fetch() throws IOException { return new GlobalQuotaSettings(userName, null, namespace, QuotaUtil.getUserQuota( - masterServices.getConnection(), userName, namespace)); + master.getConnection(), userName, namespace)); } @Override public void update(GlobalQuotaSettings quotaPojo) throws IOException { - QuotaUtil.addUserQuota(masterServices.getConnection(), userName, namespace, + QuotaUtil.addUserQuota(master.getConnection(), userName, namespace, quotaPojo.toQuotas()); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, namespace); + QuotaUtil.deleteUserQuota(master.getConnection(), userName, namespace); } @Override public void preApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().preSetUserQuota( + master.getMasterCoprocessorHost().preSetUserQuota( userName, namespace, quotaPojo); } @Override public void postApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().postSetUserQuota( - userName, namespace, quotaPojo); + master.getMasterCoprocessorHost().postSetUserQuota(userName, namespace, quotaPojo); } }); } @@ -240,23 +239,23 @@ public class MasterQuotaManager implements RegionStateListener { @Override public GlobalQuotaSettings fetch() throws IOException { return new GlobalQuotaSettings(null, table, null, QuotaUtil.getTableQuota( - masterServices.getConnection(), table)); + master.getConnection(), table)); } @Override public void update(GlobalQuotaSettings quotaPojo) throws IOException { - QuotaUtil.addTableQuota(masterServices.getConnection(), table, quotaPojo.toQuotas()); + QuotaUtil.addTableQuota(master.getConnection(), table, quotaPojo.toQuotas()); } @Override public void delete() throws IOException { - QuotaUtil.deleteTableQuota(masterServices.getConnection(), table); + QuotaUtil.deleteTableQuota(master.getConnection(), table); } @Override public void preApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().preSetTableQuota(table, quotaPojo); + master.getMasterCoprocessorHost().preSetTableQuota(table, quotaPojo); } @Override public void postApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().postSetTableQuota(table, quotaPojo); + master.getMasterCoprocessorHost().postSetTableQuota(table, quotaPojo); } }); } @@ -266,25 +265,25 @@ public class MasterQuotaManager implements RegionStateListener { setQuota(req, new SetQuotaOperations() { @Override public GlobalQuotaSettings fetch() throws IOException { - return new GlobalQuotaSettings(null, null, namespace, QuotaUtil.getNamespaceQuota( - masterServices.getConnection(), namespace)); + return new GlobalQuotaSettings(null, null, namespace, + QuotaUtil.getNamespaceQuota(master.getConnection(), namespace)); } @Override public void update(GlobalQuotaSettings quotaPojo) throws IOException { - QuotaUtil.addNamespaceQuota(masterServices.getConnection(), namespace, + QuotaUtil.addNamespaceQuota(master.getConnection(), namespace, ((GlobalQuotaSettings) quotaPojo).toQuotas()); } @Override public void delete() throws IOException { - QuotaUtil.deleteNamespaceQuota(masterServices.getConnection(), namespace); + QuotaUtil.deleteNamespaceQuota(master.getConnection(), namespace); } @Override public void preApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().preSetNamespaceQuota(namespace, quotaPojo); + master.getMasterCoprocessorHost().preSetNamespaceQuota(namespace, quotaPojo); } @Override public void postApply(GlobalQuotaSettings quotaPojo) throws IOException { - masterServices.getMasterCoprocessorHost().postSetNamespaceQuota(namespace, quotaPojo); + master.getMasterCoprocessorHost().postSetNamespaceQuota(namespace, quotaPojo); } }); } @@ -429,12 +428,12 @@ public class MasterQuotaManager implements RegionStateListener { */ private void checkQuotaSupport() throws IOException { - if (!QuotaUtil.isQuotaEnabled(masterServices.getConfiguration())) { + if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) { throw new DoNotRetryIOException( new UnsupportedOperationException("quota support disabled")); } if (!initialized) { - long maxWaitTime = masterServices.getConfiguration().getLong( + long maxWaitTime = master.getConfiguration().getLong( "hbase.master.wait.for.quota.manager.init", 30000); // default is 30 seconds. long startTime = EnvironmentEdgeManager.currentTime(); do { @@ -452,7 +451,7 @@ public class MasterQuotaManager implements RegionStateListener { } private void createQuotaTable() throws IOException { - masterServices.createSystemTable(QuotaUtil.QUOTA_TABLE_DESC); + master.createSystemTable(QuotaUtil.QUOTA_TABLE_DESC); } private static class NamedLock { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index 80bbdc3626..bfe3619eea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -79,7 +79,7 @@ public class RegionServerSpaceQuotaManager { LOG.warn("RegionServerSpaceQuotaManager has already been started!"); return; } - this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getClusterConnection()); + this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getConnection()); rsServices.getChoreService().scheduleChore(spaceQuotaRefresher); started = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java index 806cc763f6..6c6564a091 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java @@ -42,7 +42,7 @@ public class DisableTableViolationPolicyEnforcement extends DefaultViolationPoli if (LOG.isTraceEnabled()) { LOG.trace("Starting disable of " + getTableName()); } - getRegionServerServices().getClusterConnection().getAdmin().disableTable(getTableName()); + getRegionServerServices().getConnection().getAdmin().disableTable(getTableName()); if (LOG.isTraceEnabled()) { LOG.trace("Disable is complete for " + getTableName()); } @@ -57,7 +57,7 @@ public class DisableTableViolationPolicyEnforcement extends DefaultViolationPoli if (LOG.isTraceEnabled()) { LOG.trace("Starting enable of " + getTableName()); } - getRegionServerServices().getClusterConnection().getAdmin().enableTable(getTableName()); + getRegionServerServices().getConnection().getAdmin().enableTable(getTableName()); if (LOG.isTraceEnabled()) { LOG.trace("Enable is complete for " + getTableName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 16895bf96c..9779031a81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -1988,7 +1988,7 @@ public class HRegionServer extends HasThread implements sinkConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, conf.getInt("hbase.log.replay.rpc.timeout", 30000)); // default 30 seconds sinkConf.setInt("hbase.client.serverside.retries.multiplier", 1); - this.splitLogWorker = new SplitLogWorker(this, sinkConf, this, this, walFactory); + this.splitLogWorker = new SplitLogWorker(this, sinkConf, this, walFactory); splitLogWorker.start(); } @@ -2100,12 +2100,10 @@ public class HRegionServer extends HasThread implements return getClusterConnection(); } - @Override public ClusterConnection getClusterConnection() { return this.clusterConnection; } - @Override public MetaTableLocator getMetaTableLocator() { return this.metaTableLocator; } @@ -2853,7 +2851,7 @@ public class HRegionServer extends HasThread implements @Override public String toString() { - return getServerName().toString(); + return getServerName() == null? "null": getServerName().toString(); } /** @@ -2865,12 +2863,10 @@ public class HRegionServer extends HasThread implements return threadWakeFrequency; } - @Override public ZooKeeperWatcher getZooKeeper() { return zooKeeper; } - @Override public BaseCoordinatedStateManager getCoordinatedStateManager() { return csm; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index e3ba2fa8da..06c7a61e5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -69,6 +69,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Coprocesso * constructed, it holds a read lock until it is closed. A close takes out a * write lock and consequently will block for ongoing operations and will block * new operations from starting while the close is in progress. + * + *

Below is a carefully-curated sub-set of HRegion methods expose to Coprocessors. + * Be judicious adding API. This Interface is for Coprocessors only! Internally, we make + * use of the Region implementation and NOT this Interface. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index b21d55af13..a0509ee453 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -42,6 +42,9 @@ import com.google.protobuf.Service; /** * Services provided by {@link HRegionServer} + * Below is a carefully-curated list of methods we expose to Coprocessors. + * Be judicious adding API. This Interface is for Coprocessors only! Internally, we make use of the + * RegionServer implementation and NOT this Interface. */ @InterfaceAudience.Private public interface RegionServerServices diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index f3bc18869a..3bc2cf1063 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -39,7 +39,7 @@ public interface ReplicationService { * Initializes the replication service object. * @throws IOException */ - void initialize(Server rs, FileSystem fs, Path logdir, Path oldLogDir, + void initialize(Server server, FileSystem fs, Path logdir, Path oldLogDir, WALFileLengthProvider walFileLengthProvider) throws IOException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 96e9cf5193..bad28a4d0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination; @@ -71,21 +70,17 @@ public class SplitLogWorker implements Runnable { private Configuration conf; private RegionServerServices server; - public SplitLogWorker(Server hserver, Configuration conf, RegionServerServices server, - TaskExecutor splitTaskExecutor) { - this.server = server; + public SplitLogWorker(HRegionServer hrs, Configuration conf, TaskExecutor splitTaskExecutor) { + this.server = hrs; this.conf = conf; - this.coordination = - ((BaseCoordinatedStateManager) hserver.getCoordinatedStateManager()) + this.coordination = ((BaseCoordinatedStateManager) hrs.getCoordinatedStateManager()) .getSplitLogWorkerCoordination(); - this.server = server; coordination.init(server, conf, splitTaskExecutor, this); } - public SplitLogWorker(final Server hserver, final Configuration conf, - final RegionServerServices server, final LastSequenceId sequenceIdChecker, - final WALFactory factory) { - this(server, conf, server, new TaskExecutor() { + public SplitLogWorker(final HRegionServer hrs, final Configuration conf, + final LastSequenceId sequenceIdChecker, final WALFactory factory) { + this(hrs, conf, new TaskExecutor() { @Override public Status exec(String filename, RecoveryMode mode, CancelableProgressable p) { Path walDir; @@ -102,7 +97,7 @@ public class SplitLogWorker implements Runnable { // encountered a bad non-retry-able persistent error. try { if (!WALSplitter.splitLogFile(walDir, fs.getFileStatus(new Path(walDir, filename)), - fs, conf, p, sequenceIdChecker, server.getCoordinatedStateManager(), mode, factory)) { + fs, conf, p, sequenceIdChecker, hrs.getCoordinatedStateManager(), mode, factory)) { return Status.PREEMPTED; } } catch (InterruptedIOException iioe) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 6f6f31c332..dd9722e500 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -36,6 +36,9 @@ import org.apache.yetus.audience.InterfaceStability; /** * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or * more StoreFiles, which stretch backwards over time. + *

Below is a carefully-curated list of methods we expose to Coprocessors. + * Be judicious adding API. This Interface is for Coprocessors only! Internally, we make use of the + * Store implementation and NOT this Interface. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 775d63f903..54fb39f337 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -96,7 +96,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** Default amount of time to check for errors while regions finish snapshotting */ private static final long SNAPSHOT_REQUEST_WAKE_MILLIS_DEFAULT = 500; - private RegionServerServices rss; + private RegionServerServices hrs; private ProcedureMemberRpcs memberRpcs; private ProcedureMember member; @@ -109,7 +109,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { */ RegionServerSnapshotManager(Configuration conf, HRegionServer parent, ProcedureMemberRpcs memberRpc, ProcedureMember procMember) { - this.rss = parent; + this.hrs = parent; this.memberRpcs = memberRpc; this.member = procMember; } @@ -121,8 +121,9 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { */ @Override public void start() { - LOG.debug("Start Snapshot Manager " + rss.getServerName().toString()); - this.memberRpcs.start(rss.getServerName().toString(), member); + String name = hrs.getServerName().toString(); + LOG.debug("Start Snapshot Manager " + name); + this.memberRpcs.start(name, member); } /** @@ -155,8 +156,8 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { public Subprocedure buildSubprocedure(SnapshotDescription snapshot) { // don't run a snapshot if the parent is stop(ping) - if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start snapshot on RS: " + rss.getServerName() + if (hrs.isStopping() || hrs.isStopped()) { + throw new IllegalStateException("Can't start snapshot on RS: " + hrs.getServerName() + ", because stopping/stopped!"); } @@ -177,7 +178,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { LOG.debug("Launching subprocedure for snapshot " + snapshot.getName() + " from table " + snapshot.getTable() + " type " + snapshot.getType()); ForeignExceptionDispatcher exnDispatcher = new ForeignExceptionDispatcher(snapshot.getName()); - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); long timeoutMillis = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); long wakeMillis = conf.getLong(SNAPSHOT_REQUEST_WAKE_MILLIS_KEY, @@ -186,7 +187,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { switch (snapshot.getType()) { case FLUSH: SnapshotSubprocedurePool taskManager = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + new SnapshotSubprocedurePool(hrs.getServerName().toString(), conf, hrs); return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, involvedRegions, snapshot, taskManager); case SKIPFLUSH: @@ -198,7 +199,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * To minimized the code change, class name is not changed. */ SnapshotSubprocedurePool taskManager2 = - new SnapshotSubprocedurePool(rss.getServerName().toString(), conf, rss); + new SnapshotSubprocedurePool(hrs.getServerName().toString(), conf, hrs); return new FlushSnapshotSubprocedure(member, exnDispatcher, wakeMillis, timeoutMillis, involvedRegions, snapshot, taskManager2); @@ -223,7 +224,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * @throws IOException */ private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - List onlineRegions = rss.getRegions(TableName.valueOf(snapshot.getTable())); + List onlineRegions = hrs.getRegions(TableName.valueOf(snapshot.getTable())); Iterator iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { @@ -386,23 +387,23 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { /** * Create a default snapshot handler - uses a zookeeper based member controller. - * @param rss region server running the handler + * @param hrs region server running the handler * @throws KeeperException if the zookeeper cluster cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws KeeperException { - this.rss = rss; - ZooKeeperWatcher zkw = rss.getZooKeeper(); + public void initialize(HRegionServer hrs) throws KeeperException { + this.hrs = hrs; + ZooKeeperWatcher zkw = hrs.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION); // read in the snapshot request configuration properties - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); long keepAlive = conf.getLong(SNAPSHOT_TIMEOUT_MILLIS_KEY, SNAPSHOT_TIMEOUT_MILLIS_DEFAULT); int opThreads = conf.getInt(SNAPSHOT_REQUEST_THREADS_KEY, SNAPSHOT_REQUEST_THREADS_DEFAULT); // create the actual snapshot procedure member - ThreadPoolExecutor pool = ProcedureMember.defaultPool(rss.getServerName().toString(), + ThreadPoolExecutor pool = ProcedureMember.defaultPool(hrs.getServerName().toString(), opThreads, keepAlive); this.member = new ProcedureMember(memberRpcs, pool, new SnapshotSubprocedureBuilder()); } @@ -411,5 +412,4 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { public String getProcedureSignature() { return SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION; } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index d26f2536b2..ffd92a7b5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -31,6 +31,8 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; @@ -95,7 +97,8 @@ public class Replication extends WALActionsListener.Base implements * @param oldLogDir directory where logs are archived * @throws IOException */ - public Replication(Server server, FileSystem fs, Path logDir, Path oldLogDir) throws IOException { + public Replication(Server server, FileSystem fs, Path logDir, Path oldLogDir) + throws IOException { initialize(server, fs, logDir, oldLogDir, p -> OptionalLong.empty()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 7fdb252b37..a9a66a0f4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -146,16 +148,6 @@ public class ReplicationSyncUp extends Configured implements Tool { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf(hostname, 1234, 1L); } @@ -187,11 +179,5 @@ public class ReplicationSyncUp extends Configured implements Tool { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 9cdec3cd4f..61a4399a19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -19,6 +19,11 @@ package org.apache.hadoop.hbase.security.access; import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap; import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; @@ -43,9 +48,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; @@ -63,10 +66,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.QualifierFilter; import org.apache.hadoop.hbase.filter.RegexStringComparator; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.regionserver.BloomType; @@ -126,27 +127,6 @@ public class AccessControlLists { private static final Log LOG = LogFactory.getLog(AccessControlLists.class); /** - * Create the ACL table - * @param master - * @throws IOException - */ - static void createACLTable(MasterServices master) throws IOException { - /** Table descriptor for ACL table */ - final HTableDescriptor ACL_TABLEDESC = new HTableDescriptor(ACL_TABLE_NAME) - .addFamily(new HColumnDescriptor(ACL_LIST_FAMILY) - .setMaxVersions(1) - .setInMemory(true) - .setBlockCacheEnabled(true) - .setBlocksize(8 * 1024) - .setBloomFilterType(BloomType.NONE) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will - // be the case if we are using CombinedBlockCache (Bucket Cache). - .setCacheDataInL1(true)); - master.createSystemTable(ACL_TABLEDESC); - } - - /** * Stores a new user permission grant in the access control lists table. * @param conf the configuration * @param userPerm the details of the permission to be granted @@ -800,4 +780,23 @@ public class AccessControlLists { } return results; } + + static void createACLTable(MasterServices masterServices) throws IOException { + /** Table descriptor for ACL table */ + final TableDescriptor ACL_TABLEDESC = + TableDescriptorBuilder.newBuilder(AccessControlLists.ACL_TABLE_NAME).addColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(AccessControlLists.ACL_LIST_FAMILY). + setMaxVersions(1). + setInMemory(true). + setBlockCacheEnabled(true). + setBlocksize(8 * 1024). + setBloomFilterType(BloomType.NONE). + setScope(HConstants.REPLICATION_SCOPE_LOCAL) + // Set cache data blocks in L1 if more than one cache tier deployed; e.g. this will + // be the case if we are using CombinedBlockCache (Bucket Cache). + .setCacheDataInL1(true).build()).build(); + try (Admin admin = masterServices.getConnection().getAdmin()) { + admin.createTable(ACL_TABLEDESC); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 6da09cd715..6bfc3e0b24 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -51,7 +51,9 @@ import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -61,6 +63,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -75,6 +78,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.EndpointObserver; @@ -89,11 +93,13 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.locking.LockProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; @@ -105,6 +111,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.quotas.GlobalQuotaSettings; +import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.regionserver.Region; @@ -2548,10 +2555,12 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor, if (regex == null && tableNamesList != null && !tableNamesList.isEmpty()) { // Otherwise, if the requestor has ADMIN or CREATE privs for all listed tables, the // request can be granted. - MasterServices masterServices = ctx.getEnvironment().getMasterServices(); + // TODO: PRESUMPTION! Doing this cast for now. AccessController needs to be integrated, not + // done as a coprocessor. + HMaster master = (HMaster)ctx.getEnvironment().getMasterServices(); for (TableName tableName: tableNamesList) { // Skip checks for a table that does not exist - if (!masterServices.getTableStateManager().isTablePresent(tableName)) + if (!master.getTableStateManager().isTablePresent(tableName)) continue; requirePermission(getActiveUser(ctx), "getTableDescriptors", tableName, null, null, Action.ADMIN, Action.CREATE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index be04b18ef5..9b7dec40fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Delete; @@ -226,7 +228,8 @@ public class VisibilityController implements MasterCoprocessor, RegionCoprocesso DisabledRegionSplitPolicy.class.getName()); labelsTable.setValue(Bytes.toBytes(HConstants.DISALLOW_WRITES_IN_RECOVERING), Bytes.toBytes(true)); - master.createSystemTable(labelsTable); + // We should not be doing this out here. This cast is presumptious. TODO. + ((HMaster)master).createSystemTable(labelsTable); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java index 47d504c03d..9251133462 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java @@ -28,6 +28,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.ServerManager; @@ -49,14 +50,11 @@ import org.apache.zookeeper.KeeperException; public class RegionServerTracker extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); private NavigableMap regionServers = new TreeMap<>(); - private ServerManager serverManager; - private MasterServices server; + private HMaster master; - public RegionServerTracker(ZooKeeperWatcher watcher, - MasterServices server, ServerManager serverManager) { - super(watcher); - this.server = server; - this.serverManager = serverManager; + public RegionServerTracker(HMaster master) { + super(master.getZooKeeper()); + this.master = master; } /** @@ -102,8 +100,8 @@ public class RegionServerTracker extends ZooKeeperListener { } } } - if (server.isInitialized()) { - server.checkIfShouldMoveSystemRegionAsync(); + if (this.master.isInitialized()) { + this.master.checkIfShouldMoveSystemRegionAsync(); } } @@ -120,28 +118,28 @@ public class RegionServerTracker extends ZooKeeperListener { LOG.info("RegionServer ephemeral node deleted, processing expiration [" + serverName + "]"); ServerName sn = ServerName.parseServerName(serverName); - if (!serverManager.isServerOnline(sn)) { + if (!this.master.getServerManager().isServerOnline(sn)) { LOG.warn(serverName.toString() + " is not online or isn't known to the master."+ "The latter could be caused by a DNS misconfiguration."); return; } remove(sn); - this.serverManager.expireServer(sn); + this.master.getServerManager().expireServer(sn); } } @Override public void nodeChildrenChanged(String path) { if (path.equals(watcher.znodePaths.rsZNode) - && !server.isAborted() && !server.isStopped()) { + && !this.master.isAborted() && !this.master.isStopped()) { try { List servers = ZKUtil.listChildrenAndWatchThem(watcher, watcher.znodePaths.rsZNode); refresh(servers); } catch (IOException e) { - server.abort("Unexpected zk exception getting RS nodes", e); + this.master.abort("Unexpected zk exception getting RS nodes", e); } catch (KeeperException e) { - server.abort("Unexpected zk exception getting RS nodes", e); + this.master.abort("Unexpected zk exception getting RS nodes", e); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 59ad6de4e9..6d6dea6649 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index e453be2854..902521db57 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.regionserver.ServerNonceManager; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -166,21 +165,11 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ZooKeeperWatcher getZooKeeper() { return zkw; } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public RegionServerAccounting getRegionServerAccounting() { return null; } @@ -314,11 +303,6 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public ClusterConnection getClusterConnection() { - return null; - } - - @Override public ThroughputController getFlushThroughputController() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index ca0a5ea06e..233e010dbc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java index 6213e86bf5..80e936f8e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -128,9 +129,9 @@ public class TestCoprocessorConfiguration { @Test public void testMasterCoprocessorHostDefaults() throws Exception { Configuration conf = new Configuration(CONF); - MasterServices masterServices = mock(MasterServices.class); + HMaster master = mock(HMaster.class); systemCoprocessorLoaded.set(false); - new MasterCoprocessorHost(masterServices, conf); + new MasterCoprocessorHost(master, conf); assertEquals("System coprocessors loading default was not honored", systemCoprocessorLoaded.get(), CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopHMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopHMaster.java new file mode 100644 index 0000000000..9f44a7f015 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopHMaster.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; + +public class MockNoopHMaster extends HMaster { + private final Configuration conf; + private final MetricsMaster metricsMaster; + + public MockNoopHMaster(final Configuration conf) throws IOException, KeeperException { + super(conf, null); + this.conf = conf; + this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(null)); + } + + @Override + public ChoreService getChoreService() { + return null; + } + + @Override + public ZooKeeperWatcher getZooKeeper() { + return null; + } + + @Override + public ClusterConnection getConnection() { + return null; + } + + @Override + public Configuration getConfiguration() { + return conf; + } + + @Override + public ServerName getServerName() { + return ServerName.valueOf("mock.master", 12345, 1); + } + + @Override + public void abort(String why, Throwable e) { + //no-op + } + + @Override + public boolean isAborted() { + return false; + } + + private boolean stopped = false; + + @Override + public void stop(String why) { + stopped = true; + } + + @Override + public boolean isStopping() { + return stopped; + } + + @Override + public boolean isStopped() { + return stopped; + } + + @Override + public boolean isServerCrashProcessingEnabled() { + return false; + } + + @Override + public boolean isActiveMaster() { + return true; + } + + @Override + public boolean isInitialized() { + return false; + } + + @Override + public boolean isInMaintenanceMode() { + return false; + } + + @Override + public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { + return false; + } + + @Override + public String getRegionServerVersion(ServerName sn) { + return "0.0.0"; + } + + @Override + public String getClientIdAuditPrefix() { + return null; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java deleted file mode 100644 index fda35634a0..0000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ /dev/null @@ -1,472 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import static org.mockito.Mockito.mock; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.MasterSwitchType; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.hadoop.hbase.executor.ExecutorService; -import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.master.assignment.AssignmentManager; -import org.apache.hadoop.hbase.master.locking.LockManager; -import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; -import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; -import org.apache.hadoop.hbase.procedure2.LockedResource; -import org.apache.hadoop.hbase.procedure2.Procedure; -import org.apache.hadoop.hbase.procedure2.ProcedureEvent; -import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -import org.apache.hadoop.hbase.quotas.MasterQuotaManager; -import org.apache.hadoop.hbase.replication.ReplicationException; -import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; - -import com.google.protobuf.Service; - -public class MockNoopMasterServices implements MasterServices, Server { - private final Configuration conf; - private final MetricsMaster metricsMaster; - - public MockNoopMasterServices() { - this(null); - } - - public MockNoopMasterServices(final Configuration conf) { - this.conf = conf; - this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(null)); - } - - @Override - public void checkTableModifiable(TableName tableName) throws IOException { - //no-op - } - - @Override - public long createTable( - final TableDescriptor desc, - final byte[][] splitKeys, - final long nonceGroup, - final long nonce) throws IOException { - // no-op - return -1; - } - - @Override - public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException { - return -1; - } - - @Override - public AssignmentManager getAssignmentManager() { - return null; - } - - @Override - public ExecutorService getExecutorService() { - return null; - } - - @Override - public ChoreService getChoreService() { - return null; - } - - @Override - public RegionNormalizer getRegionNormalizer() { - return null; - } - - @Override - public CatalogJanitor getCatalogJanitor() { - return null; - } - - @Override - public MasterFileSystem getMasterFileSystem() { - return null; - } - - @Override - public MasterWalManager getMasterWalManager() { - return null; - } - - @Override - public MasterCoprocessorHost getMasterCoprocessorHost() { - return null; - } - - @Override - public MasterQuotaManager getMasterQuotaManager() { - return null; - } - - @Override - public ProcedureExecutor getMasterProcedureExecutor() { - return null; - } - - @Override - public MetricsMaster getMasterMetrics() { - return metricsMaster; - } - - @Override - public ServerManager getServerManager() { - return null; - } - - @Override - public ZooKeeperWatcher getZooKeeper() { - return null; - } - - @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override - public ClusterConnection getConnection() { - return null; - } - - @Override - public Configuration getConfiguration() { - return conf; - } - - @Override - public ServerName getServerName() { - return ServerName.valueOf("mock.master", 12345, 1); - } - - @Override - public void abort(String why, Throwable e) { - //no-op - } - - @Override - public boolean isAborted() { - return false; - } - - private boolean stopped = false; - - @Override - public void stop(String why) { - stopped = true; - } - - @Override - public boolean isStopping() { - return stopped; - } - - @Override - public boolean isStopped() { - return stopped; - } - - @Override - public TableDescriptors getTableDescriptors() { - return null; - } - - @Override - public boolean isServerCrashProcessingEnabled() { - return true; - } - - @Override - public boolean registerService(Service instance) { - return false; - } - - @Override - public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) - throws IOException { - return false; //To change body of implemented methods use File | Settings | File Templates. - } - - @Override - public List> getProcedures() throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. - } - - @Override - public List getLocks() throws IOException { - return null; - } - - @Override - public List listTableDescriptorsByNamespace(String name) throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. - } - - @Override - public List listTableNamesByNamespace(String name) throws IOException { - return null; - } - - @Override - public long deleteTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override - public long truncateTable( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - - @Override - public long modifyTable( - final TableName tableName, - final TableDescriptor descriptor, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override - public long enableTable( - final TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override - public long disableTable( - TableName tableName, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override - public long addColumn(final TableName tableName, final ColumnFamilyDescriptor columnDescriptor, - final long nonceGroup, final long nonce) throws IOException { - return -1; - } - - @Override - public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor, - final long nonceGroup, final long nonce) throws IOException { - return -1; - } - - @Override - public long deleteColumn(final TableName tableName, final byte[] columnName, - final long nonceGroup, final long nonce) throws IOException { - return -1; - } - - @Override - public long mergeRegions( - final RegionInfo[] regionsToMerge, - final boolean forcible, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override - public long splitRegion( - final RegionInfo regionInfo, - final byte[] splitRow, - final long nonceGroup, - final long nonce) throws IOException { - return -1; - } - - @Override - public TableStateManager getTableStateManager() { - return mock(TableStateManager.class); - } - - @Override - public boolean isActiveMaster() { - return true; - } - - @Override - public boolean isInitialized() { - return false; - } - - @Override - public boolean isInMaintenanceMode() { - return false; - } - - @Override - public long getLastMajorCompactionTimestamp(TableName table) throws IOException { - return 0; - } - - @Override - public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException { - return 0; - } - - @Override - public ClusterSchema getClusterSchema() { - return null; - } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - - @Override - public LoadBalancer getLoadBalancer() { - return null; - } - - @Override - public FavoredNodesManager getFavoredNodesManager() { - return null; - } - - @Override - public SnapshotManager getSnapshotManager() { - return null; - } - - @Override - public MasterProcedureManagerHost getMasterProcedureManagerHost() { - return null; - } - - @Override - public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { - return false; - } - - @Override - public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException { - } - - @Override - public void removeReplicationPeer(String peerId) throws ReplicationException { - } - - @Override - public void enableReplicationPeer(String peerId) throws ReplicationException, IOException { - } - - @Override - public void disableReplicationPeer(String peerId) throws ReplicationException, IOException { - } - - @Override - public void drainRegionServer(ServerName server) { - return; - } - - @Override - public List listDrainingRegionServers() { - return null; - } - - @Override - public void removeDrainFromRegionServer(ServerName servers) { - return; - } - - @Override - public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException, - IOException { - return null; - } - - @Override - public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig) - throws ReplicationException, IOException { - } - - @Override - public List listReplicationPeers(String regex) - throws ReplicationException, IOException { - return null; - } - - @Override - public LockManager getLockManager() { - return null; - } - - @Override - public String getRegionServerVersion(ServerName sn) { - return "0.0.0"; - } - - @Override - public void checkIfShouldMoveSystemRegionAsync() { - } - - @Override - public boolean recoverMeta() throws IOException { - return false; - } - - @Override - public String getClientIdAuditPrefix() { - return null; - } - - @Override - public ProcedureEvent getInitializedEvent() { - // TODO Auto-generated method stub - return null; - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index f77706727c..2498566d59 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -63,7 +62,6 @@ import org.apache.hadoop.hbase.regionserver.ServerNonceManager; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -291,19 +289,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } - @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } @Override public ServerName getServerName() { @@ -655,11 +644,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public ClusterConnection getClusterConnection() { - return null; - } - - @Override public ThroughputController getFlushThroughputController() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 418216c7b6..54307b74c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -29,7 +29,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -190,12 +188,10 @@ public class TestActiveMasterManager { /** * Assert there is an active master and that it has the specified address. * @param zk - * @param thisMasterAddress * @throws KeeperException * @throws IOException */ - private void assertMaster(ZooKeeperWatcher zk, - ServerName expectedAddress) + private void assertMaster(ZooKeeperWatcher zk, ServerName expectedAddress) throws KeeperException, IOException { ServerName readAddress = MasterAddressTracker.getMasterAddress(zk); assertNotNull(readAddress); @@ -285,11 +281,6 @@ public class TestActiveMasterManager { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ServerName getServerName() { return null; } @@ -309,11 +300,6 @@ public class TestActiveMasterManager { return null; } - @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - public ClusterStatusTracker getClusterStatusTracker() { return clusterStatusTracker; } @@ -326,11 +312,5 @@ public class TestActiveMasterManager { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index 4c4a8edf84..c0f572e886 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -354,35 +354,32 @@ public class TestAssignmentListener { // Now, we follow the same order of steps that the HMaster does to setup // the ServerManager, RegionServerTracker, and DrainingServerTracker. - ServerManager serverManager = new ServerManager(master); - - RegionServerTracker regionServerTracker = new RegionServerTracker( - zooKeeper, master, serverManager); + RegionServerTracker regionServerTracker = new RegionServerTracker(master); regionServerTracker.start(); - DrainingServerTracker drainingServerTracker = new DrainingServerTracker( - zooKeeper, master, serverManager); + DrainingServerTracker drainingServerTracker = new DrainingServerTracker( zooKeeper, master, + master.getServerManager()); drainingServerTracker.start(); // Confirm our ServerManager lists are empty. - Assert.assertEquals(serverManager.getOnlineServers(), + Assert.assertEquals(master.getServerManager().getOnlineServers(), new HashMap()); - Assert.assertEquals(serverManager.getDrainingServersList(), + Assert.assertEquals(master.getServerManager().getDrainingServersList(), new ArrayList()); // checkAndRecordNewServer() is how servers are added to the ServerManager. ArrayList onlineDrainingServers = new ArrayList<>(); for (ServerName sn : onlineServers.keySet()){ // Here's the actual test. - serverManager.checkAndRecordNewServer(sn, onlineServers.get(sn)); + master.getServerManager().checkAndRecordNewServer(sn, onlineServers.get(sn)); if (drainingServers.contains(sn)){ onlineDrainingServers.add(sn); // keeping track for later verification } } // Verify the ServerManager lists are correctly updated. - Assert.assertEquals(serverManager.getOnlineServers(), onlineServers); - Assert.assertEquals(serverManager.getDrainingServersList(), + Assert.assertEquals(master.getServerManager().getOnlineServers(), onlineServers); + Assert.assertEquals(master.getServerManager().getDrainingServersList(), onlineDrainingServers); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index fcdf4d6ba5..0a53c5fdf4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; -import org.apache.hadoop.hbase.master.assignment.MockMasterServices; +import org.apache.hadoop.hbase.master.assignment.MockHMaster; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.ChunkCreator; import org.apache.hadoop.hbase.regionserver.HStore; @@ -62,8 +62,8 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Triple; +import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; @@ -79,8 +79,9 @@ public class TestCatalogJanitor { @Rule public final TestRule timeout = CategoryBasedTimeout.builder(). withTimeout(this.getClass()).withLookingForStuckThread(true).build(); @Rule public final TestName name = new TestName(); + public static final String DEFAULT_COLUMN_FAMILY_NAME = "cf"; private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); - private MockMasterServices masterServices; + private MockHMaster master; private CatalogJanitor janitor; @BeforeClass @@ -89,20 +90,19 @@ public class TestCatalogJanitor { } @Before - public void setup() throws IOException { + public void setup() throws IOException, KeeperException { setRootDirAndCleanIt(HTU, this.name.getMethodName()); NavigableMap> regionsToRegionServers = new ConcurrentSkipListMap>(); - this.masterServices = - new MockMasterServices(HTU.getConfiguration(), regionsToRegionServers); - this.masterServices.start(10, null); - this.janitor = new CatalogJanitor(masterServices); + this.master = new MockHMaster(HTU.getConfiguration(), regionsToRegionServers); + this.master.start(10, null); + this.janitor = new CatalogJanitor(this.master); } @After public void teardown() { this.janitor.cancel(true); - this.masterServices.stop("DONE"); + this.master.stop("DONE"); } /** @@ -121,7 +121,7 @@ public class TestCatalogJanitor { // Test that when both daughter regions are in place, that we do not remove the parent. Result r = createResult(parent, splita, splitb); // Add a reference under splitA directory so we don't clear out the parent. - Path rootdir = this.masterServices.getMasterFileSystem().getRootDir(); + Path rootdir = this.master.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, td.getTableName()); Path parentdir = new Path(tabledir, parent.getEncodedName()); Path storedir = HStore.getStoreHomedir(tabledir, splita, td.getColumnFamilies()[0].getName()); @@ -129,20 +129,20 @@ public class TestCatalogJanitor { long now = System.currentTimeMillis(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); - FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem(); + FileSystem fs = this.master.getMasterFileSystem().getFileSystem(); Path path = ref.write(fs, p); assertTrue(fs.exists(path)); LOG.info("Created reference " + path); // Add a parentdir for kicks so can check it gets removed by the catalogjanitor. fs.mkdirs(parentdir); assertFalse(this.janitor.cleanParent(parent, r)); - ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor()); + ProcedureTestingUtility.waitAllProcedures(master.getMasterProcedureExecutor()); assertTrue(fs.exists(parentdir)); // Remove the reference file and try again. assertTrue(fs.delete(p, true)); assertTrue(this.janitor.cleanParent(parent, r)); // Parent cleanup is run async as a procedure. Make sure parentdir is removed. - ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor()); + ProcedureTestingUtility.waitAllProcedures(master.getMasterProcedureExecutor()); assertTrue(!fs.exists(parentdir)); } @@ -168,11 +168,11 @@ public class TestCatalogJanitor { /** * @return A TableDescriptor with a tableName of current method name and a column - * family that is MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME) + * family that is DEFAULT_COLUMN_FAMILY_NAME) */ private TableDescriptor createTableDescriptorForCurrentMethod() { return TableDescriptorBuilder.newBuilder(TableName.valueOf(this.name.getMethodName())). - addColumnFamily(new HColumnDescriptor(MockMasterServices.DEFAULT_COLUMN_FAMILY_NAME)). + addColumnFamily(new HColumnDescriptor(DEFAULT_COLUMN_FAMILY_NAME)). build(); } @@ -234,7 +234,7 @@ public class TestCatalogJanitor { // Now play around with the cleanParent function. Create a ref from splita up to the parent. Path splitaRef = - createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false); + createReferences(this.master, td, parent, splita, Bytes.toBytes("ccc"), false); // Make sure actual super parent sticks around because splita has a ref. assertFalse(janitor.cleanParent(parent, regions.get(parent))); @@ -250,9 +250,9 @@ public class TestCatalogJanitor { assertTrue(fs.delete(splitaRef, true)); // Create the refs from daughters of splita. Path splitaaRef = - createReferences(this.masterServices, td, splita, splitaa, Bytes.toBytes("bbb"), false); + createReferences(this.master, td, splita, splitaa, Bytes.toBytes("bbb"), false); Path splitabRef = - createReferences(this.masterServices, td, splita, splitab, Bytes.toBytes("bbb"), true); + createReferences(this.master, td, splita, splitab, Bytes.toBytes("bbb"), true); // Test splita. It should stick around because references from splitab, etc. assertFalse(janitor.cleanParent(splita, regions.get(splita))); @@ -313,7 +313,7 @@ public class TestCatalogJanitor { // Create ref from splita to parent LOG.info("parent=" + parent.getShortNameToLog() + ", splita=" + splita.getShortNameToLog()); Path splitaRef = - createReferences(this.masterServices, td, parent, splita, Bytes.toBytes("ccc"), false); + createReferences(this.master, td, parent, splita, Bytes.toBytes("ccc"), false); LOG.info("Created reference " + splitaRef); // Parent and splita should not be removed because a reference from splita to parent. @@ -426,7 +426,7 @@ public class TestCatalogJanitor { // remove the parent. Result parentMetaRow = createResult(parent, splita, splitb); FileSystem fs = FileSystem.get(HTU.getConfiguration()); - Path rootdir = this.masterServices.getMasterFileSystem().getRootDir(); + Path rootdir = this.master.getMasterFileSystem().getRootDir(); // have to set the root directory since we use it in HFileDisposer to figure out to get to the // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). @@ -434,14 +434,14 @@ public class TestCatalogJanitor { Path tabledir = FSUtils.getTableDir(rootdir, td.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, parent, td.getColumnFamilies()[0].getName()); Path storeArchive = - HFileArchiveUtil.getStoreArchivePath(this.masterServices.getConfiguration(), parent, + HFileArchiveUtil.getStoreArchivePath(this.master.getConfiguration(), parent, tabledir, td.getColumnFamilies()[0].getName()); LOG.debug("Table dir:" + tabledir); LOG.debug("Store dir:" + storedir); LOG.debug("Store archive dir:" + storeArchive); // add a couple of store files that we can check for - FileStatus[] mockFiles = addMockStoreFiles(2, this.masterServices, storedir); + FileStatus[] mockFiles = addMockStoreFiles(2, this.master, storedir); // get the current store files for comparison FileStatus[] storeFiles = fs.listStatus(storedir); int index = 0; @@ -456,7 +456,7 @@ public class TestCatalogJanitor { assertTrue(janitor.cleanParent(parent, parentMetaRow)); Path parentDir = new Path(tabledir, parent.getEncodedName()); // Cleanup procedure runs async. Wait till it done. - ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor()); + ProcedureTestingUtility.waitAllProcedures(master.getMasterProcedureExecutor()); assertTrue(!fs.exists(parentDir)); LOG.debug("Finished cleanup of parent region"); @@ -501,7 +501,7 @@ public class TestCatalogJanitor { // remove the parent. Result r = createResult(parent, splita, splitb); FileSystem fs = FileSystem.get(HTU.getConfiguration()); - Path rootdir = this.masterServices.getMasterFileSystem().getRootDir(); + Path rootdir = this.master.getMasterFileSystem().getRootDir(); // Have to set the root directory since we use it in HFileDisposer to figure out to get to the // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). @@ -513,18 +513,18 @@ public class TestCatalogJanitor { System.out.println("Old store:" + storedir); Path storeArchive = - HFileArchiveUtil.getStoreArchivePath(this.masterServices.getConfiguration(), parent, + HFileArchiveUtil.getStoreArchivePath(this.master.getConfiguration(), parent, tabledir, td.getColumnFamilies()[0].getName()); System.out.println("Old archive:" + storeArchive); // enable archiving, make sure that files get archived - addMockStoreFiles(2, this.masterServices, storedir); + addMockStoreFiles(2, this.master, storedir); // get the current store files for comparison FileStatus[] storeFiles = fs.listStatus(storedir); // Do the cleaning of the parent assertTrue(janitor.cleanParent(parent, r)); Path parentDir = new Path(tabledir, parent.getEncodedName()); - ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor()); + ProcedureTestingUtility.waitAllProcedures(master.getMasterProcedureExecutor()); assertTrue(!fs.exists(parentDir)); // And now check to make sure that the files have actually been archived @@ -533,12 +533,12 @@ public class TestCatalogJanitor { // now add store files with the same names as before to check backup // enable archiving, make sure that files get archived - addMockStoreFiles(2, this.masterServices, storedir); + addMockStoreFiles(2, this.master, storedir); // Do the cleaning of the parent assertTrue(janitor.cleanParent(parent, r)); // Cleanup procedure runs async. Wait till it done. - ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor()); + ProcedureTestingUtility.waitAllProcedures(master.getMasterProcedureExecutor()); assertTrue(!fs.exists(parentDir)); // and now check to make sure that the files have actually been archived @@ -546,10 +546,10 @@ public class TestCatalogJanitor { assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true); } - private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir) + private FileStatus[] addMockStoreFiles(int count, HMaster master, Path storedir) throws IOException { // get the existing store files - FileSystem fs = services.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterFileSystem().getFileSystem(); fs.mkdirs(storedir); // create the store files in the parent for (int i = 0; i < count; i++) { @@ -574,11 +574,11 @@ public class TestCatalogJanitor { return FSUtils.getRootDir(htu.getConfiguration()).toString(); } - private Path createReferences(final MasterServices services, + private Path createReferences(final HMaster master, final TableDescriptor td, final HRegionInfo parent, final HRegionInfo daughter, final byte [] midkey, final boolean top) throws IOException { - Path rootdir = services.getMasterFileSystem().getRootDir(); + Path rootdir = master.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable()); Path storedir = HStore.getStoreHomedir(tabledir, daughter, td.getColumnFamilies()[0].getName()); @@ -587,7 +587,7 @@ public class TestCatalogJanitor { long now = System.currentTimeMillis(); // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); - FileSystem fs = services.getMasterFileSystem().getFileSystem(); + FileSystem fs = master.getMasterFileSystem().getFileSystem(); ref.write(fs, p); return p; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index 852b139398..0bb915929a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -27,19 +27,13 @@ import java.net.InetAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClockOutOfSyncException; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -51,7 +45,7 @@ public class TestClockSkewDetection { @Test public void testClockSkewDetection() throws Exception { final Configuration conf = HBaseConfiguration.create(); - ServerManager sm = new ServerManager(new MockNoopMasterServices(conf) { + ServerManager sm = new ServerManager(new MockNoopHMaster(conf) { @Override public ClusterConnection getClusterConnection() { ClusterConnection conn = mock(ClusterConnection.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 480ba9a8e6..a8058feae9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -210,11 +209,10 @@ public class TestMasterNoCluster { void initClusterSchemaService() throws IOException, InterruptedException {} @Override - ServerManager createServerManager(MasterServices master) throws IOException { - ServerManager sm = super.createServerManager(master); + protected ServerManager createServerManager() throws IOException { + ServerManager sm = super.createServerManager(); // Spy on the created servermanager - ServerManager spy = Mockito.spy(sm); - return spy; + return Mockito.spy(sm); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java index 87f5ba3ab5..6b68f783ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,7 +78,7 @@ public class TestRegionPlacement2 { @Test public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException { LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); - balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); + balancer.setMaster(TEST_UTIL.getMiniHBaseCluster().getMaster()); balancer.initialize(); List servers = new ArrayList<>(); for (int i = 0; i < SLAVES; i++) { @@ -139,7 +139,7 @@ public class TestRegionPlacement2 { @Test public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException { LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); - balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); + balancer.setMaster(TEST_UTIL.getMiniHBaseCluster().getMaster()); balancer.initialize(); List servers = new ArrayList<>(); for (int i = 0; i < SLAVES; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 04fc797542..86e47a6761 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -39,7 +39,6 @@ import java.io.IOException; import java.util.List; import java.util.Map; import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; import org.apache.commons.logging.Log; @@ -90,7 +89,7 @@ public class TestSplitLogManager { } private ZooKeeperWatcher zkw; - private DummyMasterServices master; + private DummyHMaster master; private SplitLogManager slm; private Configuration conf; private int to; @@ -98,11 +97,12 @@ public class TestSplitLogManager { private static HBaseTestingUtility TEST_UTIL; - class DummyMasterServices extends MockNoopMasterServices { + class DummyHMaster extends MockNoopHMaster { private ZooKeeperWatcher zkw; private CoordinatedStateManager cm; - public DummyMasterServices(ZooKeeperWatcher zkw, Configuration conf) { + public DummyHMaster(ZooKeeperWatcher zkw, Configuration conf) + throws IOException, KeeperException { super(conf); this.zkw = zkw; cm = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf); @@ -113,16 +113,6 @@ public class TestSplitLogManager { public ZooKeeperWatcher getZooKeeper() { return zkw; } - - @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return cm; - } - - @Override - public ServerManager getServerManager() { - return sm; - } } @Before @@ -133,7 +123,7 @@ public class TestSplitLogManager { // Use a different ZK wrapper instance for each tests. zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null); - master = new DummyMasterServices(zkw, conf); + master = new DummyHMaster(zkw, conf); ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode); ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockHMaster.java similarity index 96% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockHMaster.java index 073216c88f..388a21f423 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockHMaster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,6 @@ */ package org.apache.hadoop.hbase.master.assignment; -import java.io.IOException; -import java.util.HashSet; -import java.util.Map; -import java.util.NavigableMap; -import java.util.SortedSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateManager; @@ -38,11 +32,11 @@ import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterWalManager; -import org.apache.hadoop.hbase.master.MockNoopMasterServices; +import org.apache.hadoop.hbase.master.MockNoopHMaster; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; @@ -55,11 +49,6 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; import org.apache.hadoop.hbase.security.Superusers; -import org.apache.hadoop.hbase.util.FSUtils; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -72,12 +61,23 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResp import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.zookeeper.KeeperException; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.NavigableMap; +import java.util.SortedSet; /** * A mocked master services. * Tries to fake it. May not always work. */ -public class MockMasterServices extends MockNoopMasterServices { +public class MockHMaster extends MockNoopHMaster { private final MasterFileSystem fileSystemManager; private final MasterWalManager walManager; private final AssignmentManager assignmentManager; @@ -96,9 +96,9 @@ public class MockMasterServices extends MockNoopMasterServices { public static final ServerName MOCK_MASTER_SERVERNAME = ServerName.valueOf("mockmaster.example.org", 1234, -1L); - public MockMasterServices(Configuration conf, + public MockHMaster(Configuration conf, NavigableMap> regionsToRegionServers) - throws IOException { + throws IOException, KeeperException { super(conf); this.regionsToRegionServers = regionsToRegionServers; Superusers.initialize(conf); @@ -274,13 +274,8 @@ public class MockMasterServices extends MockNoopMasterServices { return MOCK_MASTER_SERVERNAME; } - @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return super.getCoordinatedStateManager(); - } - private static class MockRegionStateStore extends RegionStateStore { - public MockRegionStateStore(final MasterServices master) { + public MockRegionStateStore(HMaster master) { super(master); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java index dba5d97f7f..962d82d32e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; @@ -112,7 +112,7 @@ public class TestAssignmentManager { private HBaseTestingUtility UTIL; private MockRSProcedureDispatcher rsDispatcher; - private MockMasterServices master; + private HMaster master; private AssignmentManager am; private NavigableMap> regionsToRegionServers = new ConcurrentSkipListMap>(); @@ -141,9 +141,9 @@ public class TestAssignmentManager { UTIL = new HBaseTestingUtility(); this.executor = Executors.newSingleThreadScheduledExecutor(); setupConfiguration(UTIL.getConfiguration()); - master = new MockMasterServices(UTIL.getConfiguration(), this.regionsToRegionServers); + master = new MockHMaster(UTIL.getConfiguration(), this.regionsToRegionServers); rsDispatcher = new MockRSProcedureDispatcher(master); - master.start(NSERVERS, rsDispatcher); + ((MockHMaster)master).start(NSERVERS, rsDispatcher); am = master.getAssignmentManager(); assignProcMetrics = am.getAssignmentManagerMetrics().getAssignProcMetrics(); unassignProcMetrics = am.getAssignmentManagerMetrics().getUnassignProcMetrics(); @@ -771,7 +771,7 @@ public class TestAssignmentManager { private class MockRSProcedureDispatcher extends RSProcedureDispatcher { private MockRSExecutor mockRsExec; - public MockRSProcedureDispatcher(final MasterServices master) { + public MockRSProcedureDispatcher(HMaster master) { super(master); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index c33cd56e4a..e16267a66a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.ServerManager; @@ -90,9 +90,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase { conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); loadBalancer = new MockBalancer(); loadBalancer.setConf(conf); - MasterServices st = Mockito.mock(MasterServices.class); + HMaster st = Mockito.mock(HMaster.class); Mockito.when(st.getServerName()).thenReturn(master); - loadBalancer.setMasterServices(st); + loadBalancer.setMaster(st); // Set up the rack topologies (5 machines per rack) rackManager = Mockito.mock(RackManager.class); @@ -250,9 +250,9 @@ public class TestBaseLoadBalancer extends BalancerTestBase { ServerManager sm = Mockito.mock(ServerManager.class); Mockito.when(sm.getOnlineServersListWithPredicator(allServers, BaseLoadBalancer.IDLE_SERVER_PREDICATOR)) .thenReturn(idleServers); - MasterServices services = Mockito.mock(MasterServices.class); - Mockito.when(services.getServerManager()).thenReturn(sm); - balancer.setMasterServices(services); + HMaster m = Mockito.mock(HMaster.class); + Mockito.when(m.getServerManager()).thenReturn(sm); + balancer.setMaster(m); RegionInfo hri1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setStartKey("key1".getBytes()) .setEndKey("key2".getBytes()) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java index e636cb02a4..42390c0177 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticBalancerPickers.java @@ -142,7 +142,7 @@ public class TestFavoredStochasticBalancerPickers extends BalancerTestBase { RegionLocationFinder regionFinder = new RegionLocationFinder(); regionFinder.setClusterStatus(admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS))); regionFinder.setConf(conf); - regionFinder.setServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); + regionFinder.setMaster(TEST_UTIL.getMiniHBaseCluster().getMaster()); Cluster cluster = new Cluster(serverAssignments, null, regionFinder, new RackManager(conf)); LoadOnlyFavoredStochasticBalancer balancer = (LoadOnlyFavoredStochasticBalancer) TEST_UTIL .getMiniHBaseCluster().getMaster().getLoadBalancer(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java index 010f57a940..f6bef0efd7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionLocationFinder.java @@ -68,7 +68,7 @@ public class TestRegionLocationFinder { } finder.setConf(TEST_UTIL.getConfiguration()); - finder.setServices(cluster.getMaster()); + finder.setMaster(cluster.getMaster()); finder.setClusterStatus(cluster.getMaster().getClusterStatus()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java index 68d009dfa0..634a9044a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.MockNoopMasterServices; +import org.apache.hadoop.hbase.master.MockNoopHMaster; import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster; @@ -202,7 +202,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase { @Test public void testLocalityCost() throws Exception { Configuration conf = HBaseConfiguration.create(); - MockNoopMasterServices master = new MockNoopMasterServices(); + MockNoopHMaster master = new MockNoopHMaster(conf); StochasticLoadBalancer.CostFunction costFunction = new ServerLocalityCostFunction(conf, master); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index 1c135b9c23..471831c366 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; @@ -43,7 +42,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.Assert; @@ -216,21 +214,11 @@ public class TestHFileCleaner { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf("regionserver,60020,000000"); } @@ -257,12 +245,6 @@ public class TestHFileCleaner { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index 4ef31967c3..913822da55 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; @@ -41,7 +40,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Rule; import org.junit.Test; @@ -156,21 +154,11 @@ public class TestHFileLinkCleaner { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf("regionserver,60020,000000"); } @@ -195,11 +183,5 @@ public class TestHFileLinkCleaner { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 1a30df3d1c..96f5de68bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; @@ -48,13 +47,11 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments; -import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.replication.ReplicationQueuesClientZKImpl; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -241,21 +238,11 @@ public class TestLogsCleaner { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf("regionserver,60020,000000"); } @@ -280,12 +267,6 @@ public class TestLogsCleaner { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } static class FaultyZooKeeperWatcher extends ZooKeeperWatcher { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index e7a2588dea..b9b69fc107 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; @@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -273,21 +271,11 @@ public class TestReplicationHFileCleaner { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf("regionserver,60020,000000"); } @@ -314,12 +302,6 @@ public class TestReplicationHFileCleaner { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } static class FaultyZooKeeperWatcher extends ZooKeeperWatcher { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java index e2e97dc033..626afadee3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.LockType; @@ -57,7 +57,7 @@ public class TestLockManager { private static final Log LOG = LogFactory.getLog(TestLockProcedure.class); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static MasterServices masterServices; + private static HMaster master; private static String namespace = "namespace"; private static TableName tableName = TableName.valueOf(namespace, "table"); @@ -73,7 +73,7 @@ public class TestLockManager { public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniCluster(1); - masterServices = UTIL.getMiniHBaseCluster().getMaster(); + master = UTIL.getMiniHBaseCluster().getMaster(); UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build()); UTIL.createTable(tableName, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()}); List regions = UTIL.getAdmin().getTableRegions(tableName); @@ -111,7 +111,7 @@ public class TestLockManager { */ @Test public void testMasterLockAcquire() throws Exception { - LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock(namespace, + LockManager.MasterLock lock = master.getLockManager().createMasterLock(namespace, LockType.EXCLUSIVE, "desc"); assertTrue(lock.tryAcquire(2000)); assertTrue(lock.getProc().isLocked()); @@ -124,9 +124,9 @@ public class TestLockManager { */ @Test public void testMasterLockAcquireTimeout() throws Exception { - LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock( + LockManager.MasterLock lock = master.getLockManager().createMasterLock( tableName, LockType.EXCLUSIVE, "desc"); - LockManager.MasterLock lock2 = masterServices.getLockManager().createMasterLock( + LockManager.MasterLock lock2 = master.getLockManager().createMasterLock( tableName, LockType.EXCLUSIVE, "desc"); assertTrue(lock.tryAcquire(2000)); assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT/2)); // wait less than other lock's timeout @@ -142,9 +142,9 @@ public class TestLockManager { */ @Test public void testMasterLockAcquireTimeoutRegionVsTableExclusive() throws Exception { - LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock( + LockManager.MasterLock lock = master.getLockManager().createMasterLock( tableRegions, "desc"); - LockManager.MasterLock lock2 = masterServices.getLockManager().createMasterLock( + LockManager.MasterLock lock2 = master.getLockManager().createMasterLock( tableName, LockType.EXCLUSIVE, "desc"); assertTrue(lock.tryAcquire(2000)); assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT/2)); // wait less than other lock's timeout diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index ab6d7d0ffb..2269bbcc1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -35,8 +35,10 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -49,10 +51,6 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; import org.mockito.Mockito; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse; /** * Tests logic of {@link SimpleRegionNormalizer}. @@ -347,14 +345,13 @@ public class TestSimpleRegionNormalizer { protected void setupMocksForNormalizer(Map regionSizes, List RegionInfo) { - masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS); - masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS); + HMaster master = Mockito.mock(HMaster.class, RETURNS_DEEP_STUBS); // for simplicity all regions are assumed to be on one server; doesn't matter to us ServerName sn = ServerName.valueOf("localhost", 0, 1L); - when(masterServices.getAssignmentManager().getRegionStates(). + when(master.getAssignmentManager().getRegionStates(). getRegionsOfTable(any(TableName.class))).thenReturn(RegionInfo); - when(masterServices.getAssignmentManager().getRegionStates(). + when(master.getAssignmentManager().getRegionStates(). getRegionServerOfRegion(any(RegionInfo.class))).thenReturn(sn); for (Map.Entry region : regionSizes.entrySet()) { @@ -362,18 +359,11 @@ public class TestSimpleRegionNormalizer { when(regionLoad.getName()).thenReturn(region.getKey()); when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue()); - when(masterServices.getServerManager().getLoad(sn). + when(master.getServerManager().getLoad(sn). getRegionsLoad().get(region.getKey())).thenReturn(regionLoad); } - try { - when(masterRpcServices.isSplitOrMergeEnabled(any(RpcController.class), - any(IsSplitOrMergeEnabledRequest.class))).thenReturn( - IsSplitOrMergeEnabledResponse.newBuilder().setEnabled(true).build()); - } catch (ServiceException se) { - LOG.debug("error setting isSplitOrMergeEnabled switch", se); - } - - normalizer.setMasterServices(masterServices); + when(master.isSplitOrMergeEnabled(any(MasterSwitchType.class))).thenReturn(true); + normalizer.setMaster(master); normalizer.setMasterRpcServices(masterRpcServices); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 1485966900..2ffaffde11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -73,7 +73,7 @@ public class MasterProcedureTestingUtility { public static void restartMasterProcedureExecutor(ProcedureExecutor procExec) throws Exception { final MasterProcedureEnv env = procExec.getEnvironment(); - final HMaster master = (HMaster)env.getMasterServices(); + final HMaster master = (HMaster)env.getMaster(); ProcedureTestingUtility.restart(procExec, true, true, // stop services new Callable() { @@ -82,7 +82,7 @@ public class MasterProcedureTestingUtility { final AssignmentManager am = env.getAssignmentManager(); // try to simulate a master restart by removing the ServerManager states about seqIDs for (RegionState regionState: am.getRegionStates().getRegionStates()) { - env.getMasterServices().getServerManager().removeRegion(regionState.getRegion()); + env.getMaster().getServerManager().removeRegion(regionState.getRegion()); } am.stop(); master.setServerCrashProcessingEnabled(false); @@ -399,8 +399,7 @@ public class MasterProcedureTestingUtility { *

This is a good test for finding state that needs persisting and steps that are not * idempotent. Use this version of the test when the order in which flow steps are executed is * not start to finish; where the procedure may vary the flow steps dependent on circumstance - * found. - * @see #testRecoveryAndDoubleExecution(ProcedureExecutor, long, int) + * found. See #testRecoveryAndDoubleExecution(ProcedureExecutor, long, int) */ public static void testRecoveryAndDoubleExecution( final ProcedureExecutor procExec, final long procId) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java index f716de61fc..ec1fc3650e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,11 +28,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner; @@ -55,7 +55,7 @@ public class TestSnapshotManager { @Rule public TestName name = new TestName(); - MasterServices services = Mockito.mock(MasterServices.class); + HMaster master = Mockito.mock(HMaster.class); MetricsMaster metrics = Mockito.mock(MetricsMaster.class); ProcedureCoordinator coordinator = Mockito.mock(ProcedureCoordinator.class); ExecutorService pool = Mockito.mock(ExecutorService.class); @@ -75,12 +75,12 @@ public class TestSnapshotManager { private SnapshotManager getNewManager(final Configuration conf) throws IOException, KeeperException { - Mockito.reset(services); - Mockito.when(services.getConfiguration()).thenReturn(conf); - Mockito.when(services.getMasterFileSystem()).thenReturn(mfs); + Mockito.reset(master); + Mockito.when(master.getConfiguration()).thenReturn(conf); + Mockito.when(master.getMasterFileSystem()).thenReturn(mfs); Mockito.when(mfs.getFileSystem()).thenReturn(fs); Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir()); - return new SnapshotManager(services, metrics, coordinator, pool); + return new SnapshotManager(master, coordinator, pool); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java index 8eb2e58827..0467233368 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; @@ -40,7 +41,7 @@ public class SimpleMasterProcedureManager extends MasterProcedureManager { private static final Log LOG = LogFactory.getLog(SimpleMasterProcedureManager.class); - private MasterServices master; + private HMaster master; private ProcedureCoordinator coordinator; private boolean done; @@ -56,7 +57,7 @@ public class SimpleMasterProcedureManager extends MasterProcedureManager { } @Override - public void initialize(MasterServices master, MetricsMaster metricsMaster) + public void initialize(HMaster master, MetricsMaster metricsMaster) throws KeeperException, IOException, UnsupportedOperationException { this.master = master; this.done = false; @@ -117,5 +118,4 @@ public class SimpleMasterProcedureManager extends MasterProcedureManager { public boolean isProcedureDone(ProcedureDescription desc) throws IOException { return done; } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 58efa87be1..b335033cd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.errorhandling.ForeignException; @@ -44,13 +45,13 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { private static final Log LOG = LogFactory.getLog(SimpleRSProcedureManager.class); - private RegionServerServices rss; + private HRegionServer hrs; private ProcedureMemberRpcs memberRpcs; private ProcedureMember member; @Override - public void initialize(RegionServerServices rss) throws KeeperException { - this.rss = rss; + public void initialize(HRegionServer rss) throws KeeperException { + this.hrs = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature()); @@ -62,7 +63,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { @Override public void start() { - this.memberRpcs.start(rss.getServerName().toString(), member); + this.memberRpcs.start(hrs.getServerName().toString(), member); LOG.info("Started."); } @@ -88,18 +89,18 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { public Subprocedure buildSubprocedure(String name) { // don't run a procedure if the parent is stop(ping) - if (rss.isStopping() || rss.isStopped()) { - throw new IllegalStateException("Can't start procedure on RS: " + rss.getServerName() + if (hrs.isStopping() || hrs.isStopped()) { + throw new IllegalStateException("Can't start procedure on RS: " + hrs.getServerName() + ", because stopping/stopped!"); } LOG.info("Attempting to run a procedure."); ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher(); - Configuration conf = rss.getConfiguration(); + Configuration conf = hrs.getConfiguration(); SimpleSubprocedurePool taskManager = - new SimpleSubprocedurePool(rss.getServerName().toString(), conf); - return new SimpleSubprocedure(rss, member, errorDispatcher, taskManager, name); + new SimpleSubprocedurePool(hrs.getServerName().toString(), conf); + return new SimpleSubprocedure(hrs, member, errorDispatcher, taskManager, name); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java index c024294eb2..b17875b059 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotaManager.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.mock; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; @@ -33,16 +34,16 @@ public class TestMasterQuotaManager { @Test public void testUninitializedQuotaManangerDoesNotFail() { - MasterServices masterServices = mock(MasterServices.class); - MasterQuotaManager manager = new MasterQuotaManager(masterServices); + HMaster master = mock(HMaster.class); + MasterQuotaManager manager = new MasterQuotaManager(master); manager.addRegionSize(null, 0, 0); assertNotNull(manager.snapshotRegionSizes()); } @Test public void testOldEntriesRemoved() { - MasterServices masterServices = mock(MasterServices.class); - MasterQuotaManager manager = new MasterQuotaManager(masterServices); + HMaster master = mock(HMaster.class); + MasterQuotaManager manager = new MasterQuotaManager(master); manager.initializeRegionSizes(); // Mock out some regions TableName tableName = TableName.valueOf("foo"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index ba0d309bf7..edcd8f8abb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -28,7 +28,6 @@ import java.util.Iterator; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -47,7 +46,6 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -868,21 +866,11 @@ public class TestHeapMemoryManager { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf("server1",4000,12345); } @@ -891,12 +879,6 @@ public class TestHeapMemoryManager { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } static class CustomHeapMemoryTuner implements HeapMemoryTuner { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 1d2b038171..67f2162606 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.IOException; import java.util.List; import java.util.concurrent.atomic.LongAdder; @@ -38,7 +39,6 @@ import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLo import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.CancelableProgressable; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -79,16 +78,16 @@ public class TestSplitLogWorker { private ExecutorService executorService; private RecoveryMode mode; - class DummyServer implements Server { + class DummyServer extends HRegionServer { private ZooKeeperWatcher zkw; private Configuration conf; private CoordinatedStateManager cm; - public DummyServer(ZooKeeperWatcher zkw, Configuration conf) { + public DummyServer(ZooKeeperWatcher zkw, Configuration conf) throws IOException { + super(conf, CoordinatedStateManagerFactory.getCoordinatedStateManager(conf)); this.zkw = zkw; this.conf = conf; - cm = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf); - cm.initialize(this); + getCoordinatedStateManager().initialize(this); } @Override @@ -125,30 +124,14 @@ public class TestSplitLogWorker { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return cm; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } private void waitForCounter(LongAdder ctr, long oldval, long newval, long timems) @@ -239,14 +222,12 @@ public class TestSplitLogWorker { SplitLogCounters.resetCounters(); final String TATAS = "tatas"; final ServerName RS = ServerName.valueOf("rs,1,1"); - RegionServerServices mockedRS = getRegionServer(RS); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - SplitLogWorker slw = - new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); + SplitLogWorker slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); @@ -279,12 +260,10 @@ public class TestSplitLogWorker { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT), new SplitLogTask.Unassigned(MANAGER, this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - RegionServerServices mockedRS1 = getRegionServer(SVR1); - RegionServerServices mockedRS2 = getRegionServer(SVR2); SplitLogWorker slw1 = - new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS1, neverEndingTask); + new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), neverEndingTask); SplitLogWorker slw2 = - new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS2, neverEndingTask); + new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), neverEndingTask); slw1.start(); slw2.start(); try { @@ -309,9 +288,7 @@ public class TestSplitLogWorker { SplitLogCounters.resetCounters(); final ServerName SRV = ServerName.valueOf("tpt_svr,1,1"); final String PATH = ZKSplitLog.getEncodedNodeName(zkw, "tpt_task"); - RegionServerServices mockedRS = getRegionServer(SRV); - SplitLogWorker slw = - new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); + SplitLogWorker slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), neverEndingTask); slw.start(); try { Thread.yield(); // let the worker start @@ -342,9 +319,7 @@ public class TestSplitLogWorker { SplitLogCounters.resetCounters(); final ServerName SRV = ServerName.valueOf("tmt_svr,1,1"); final String PATH1 = ZKSplitLog.getEncodedNodeName(zkw, "tmt_task"); - RegionServerServices mockedRS = getRegionServer(SRV); - SplitLogWorker slw = - new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); + SplitLogWorker slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), neverEndingTask); slw.start(); try { Thread.yield(); // let the worker start @@ -385,8 +360,7 @@ public class TestSplitLogWorker { LOG.info("testRescan"); SplitLogCounters.resetCounters(); final ServerName SRV = ServerName.valueOf("svr,1,1"); - RegionServerServices mockedRS = getRegionServer(SRV); - slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), mockedRS, neverEndingTask); + slw = new SplitLogWorker(ds, TEST_UTIL.getConfiguration(), neverEndingTask); slw.start(); Thread.yield(); // let the worker start Thread.sleep(100); @@ -441,14 +415,13 @@ public class TestSplitLogWorker { final int maxTasks = 3; Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); testConf.setInt("hbase.regionserver.wal.max.splitters", maxTasks); - RegionServerServices mockedRS = getRegionServer(RS); for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), this.mode).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } - SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask); + SplitLogWorker slw = new SplitLogWorker(ds, testConf, neverEndingTask); slw.start(); try { waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, maxTasks, WAIT_TIME); @@ -477,7 +450,6 @@ public class TestSplitLogWorker { final int maxTasks = 3; Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); testConf.setInt("hbase.regionserver.wal.max.splitters", maxTasks); - RegionServerServices mockedRS = getRegionServer(RS); // create two RS nodes String rsPath = ZKUtil.joinZNode(zkw.znodePaths.rsZNode, RS.getServerName()); @@ -491,7 +463,7 @@ public class TestSplitLogWorker { Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } - SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask); + SplitLogWorker slw = new SplitLogWorker(ds, testConf, neverEndingTask); slw.start(); try { int acquiredTasks = 0; @@ -511,19 +483,14 @@ public class TestSplitLogWorker { /** * Create a mocked region server service instance - * @param server - * @return */ - private RegionServerServices getRegionServer(ServerName name) { - - RegionServerServices mockedServer = mock(RegionServerServices.class); + private HRegionServer getRegionServer(ServerName name) { + HRegionServer mockedServer = mock(HRegionServer.class); when(mockedServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); when(mockedServer.getServerName()).thenReturn(name); when(mockedServer.getZooKeeper()).thenReturn(zkw); when(mockedServer.isStopped()).thenReturn(false); when(mockedServer.getExecutorService()).thenReturn(executorService); - return mockedServer; } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index a8a60abb89..f6df9da068 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -1,4 +1,4 @@ -/** +/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -55,7 +54,6 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALProvider.Writer; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.After; import org.junit.Before; @@ -510,21 +508,11 @@ public class TestWALLockup { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf(this.serverName); } @@ -554,12 +542,6 @@ public class TestWALLockup { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - return null; - } - } static class DummyWALActionsListener extends WALActionsListener.Base { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java index 3685d6daaf..8b2d3a5b4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,12 @@ package org.apache.hadoop.hbase.replication; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -416,21 +414,11 @@ public class TestReplicationStateHBaseImpl { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf(this.serverName); } @@ -461,11 +449,6 @@ public class TestReplicationStateHBaseImpl { return null; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - public int getAbortCount() { return abortCount; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 36f762eb2d..58791cabd5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; @@ -36,7 +35,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKConfig; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -155,21 +153,11 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf(this.serverName); } @@ -199,11 +187,5 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index f4ae59e839..506c5c99a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -31,7 +31,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -252,21 +250,11 @@ public class TestReplicationTrackerZKImpl { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf(this.serverName); } @@ -296,11 +284,5 @@ public class TestReplicationTrackerZKImpl { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 3934e05e97..bdf4283bf5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -674,20 +674,11 @@ public abstract class TestReplicationSourceManager { } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - @Override public ClusterConnection getConnection() { return null; } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ServerName getServerName() { return ServerName.valueOf(hostname, 1234, 1L); } @@ -716,11 +707,5 @@ public abstract class TestReplicationSourceManager { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 56a66e93a9..9b1c1c38eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -38,7 +38,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; @@ -51,7 +50,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; import org.apache.hadoop.hbase.ipc.NettyRpcServer; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -78,7 +76,6 @@ import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Strings; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.net.DNS; @@ -219,21 +216,11 @@ public class TestTokenAuthentication { } @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override public ZooKeeperWatcher getZooKeeper() { return zookeeper; } @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override public boolean isAborted() { return aborted; } @@ -403,12 +390,6 @@ public class TestTokenAuthentication { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } } @Parameters(name = "{index}: rpcServerImpl={0}") diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java deleted file mode 100644 index 90fb5217a7..0000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; - -/** - * Basic mock Server for handler tests. - */ -public class MockServer implements Server { - private static final Log LOG = LogFactory.getLog(MockServer.class); - final static ServerName NAME = ServerName.valueOf("MockServer", 123, -1); - - boolean stopped; - boolean aborted; - final ZooKeeperWatcher zk; - final HBaseTestingUtility htu; - - @SuppressWarnings("unused") - public MockServer() throws ZooKeeperConnectionException, IOException { - // Shutdown default constructor by making it private. - this(null); - } - - public MockServer(final HBaseTestingUtility htu) - throws ZooKeeperConnectionException, IOException { - this(htu, true); - } - - /** - * @param htu Testing utility to use - * @param zkw If true, create a zkw. - * @throws ZooKeeperConnectionException - * @throws IOException - */ - public MockServer(final HBaseTestingUtility htu, final boolean zkw) - throws ZooKeeperConnectionException, IOException { - this.htu = htu; - this.zk = zkw? - new ZooKeeperWatcher(htu.getConfiguration(), NAME.toString(), this, true): - null; - } - - @Override - public void abort(String why, Throwable e) { - LOG.fatal("Abort why=" + why, e); - stop(why); - this.aborted = true; - } - - @Override - public void stop(String why) { - LOG.debug("Stop why=" + why); - this.stopped = true; - } - - @Override - public boolean isStopped() { - return this.stopped; - } - - @Override - public Configuration getConfiguration() { - return this.htu.getConfiguration(); - } - - @Override - public ZooKeeperWatcher getZooKeeper() { - return this.zk; - } - - @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return null; - } - - @Override - public ClusterConnection getConnection() { - return null; - } - - @Override - public MetaTableLocator getMetaTableLocator() { - return null; - } - - @Override - public ServerName getServerName() { - return NAME; - } - - @Override - public boolean isAborted() { - // TODO Auto-generated method stub - return this.aborted; - } - - @Override - public ChoreService getChoreService() { - return null; - } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } -} -- 2.11.0 (Apple Git-81)