From 683f2cce5b523048449d2a5bb1e3c8f959d03d5e Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 24 Feb 2017 17:10:03 +0000 Subject: [PATCH] HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Includes two JIRAs from Matteos' repository and then fix up to get it all to pass, fix findbugs, etc.. I apply the two patches in one go because applying each independently puts hbase in a non-working state. 1. HBASE-14616 Procedure v2 - Replace the old AM with the new AM This comes from Matteo's repo here: https://github.com/matteobertozzi/hbase/commit/689227fcbfe8e6588433dbcdabf4526e3d478b2e Patch replaces old AM with the new under subpackage master.assignment. Mostly just updating classes to use new AM -- import changes -- rather than the old. It also removes old AM and supporting classes. See below for more detail. 2. HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi) Adds running of remote procedure. Adds batching of remote calls. Adds support for assign/unassign in procedures. Adds version info reporting in rpc. Adds start of an AMv2. This work mostly comes from: https://github.com/matteobertozzi/hbase/commit/3622cba4e331d2fc7bfc1932abb4c9cbf5802efa Reporting of remote RS version is from here: https://github.com/matteobertozzi/hbase/commit/ddb4df3964e8298c88c0210e83493aa91ac0942d.patch And remote dispatch of procedures is from: https://github.com/matteobertozzi/hbase/commit/186b9e7c4dae61a79509a6c3aad7f80ec61345e5 Adds testing util for new AM and new sets of tests. Details: M hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java Takes list of regionstates on construction rather than a Set on construction. NOTE!!!!! This is a change in a public class. M hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java Purge old overlapping states: PENDING_OPEN, PENDING_CLOSE, etc. A hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java Dispatch remote procedures every 150ms or 32 items -- which ever happens first (configurable). Runs a timeout thread. Carries notion of a remote procedure and of a buffer full of these. "hbase.procedure.remote.dispatcher.threadpool.size" with default = 128 "hbase.procedure.remote.dispatcher.delay.msec" with default = 150ms "hbase.procedure.remote.dispatcher.max.queue.size" with default = 32 M hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto Add execute procedures call ExecuteProcedures. M hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto Add assign and unassign state support for procedures. M hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java Adds getting RS version out of RPC Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) M hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Remove periodic metrics chore. This is done over in new AM now. Replace AM with the new. M hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java Have AMv2 handle assigning meta. M hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java Extract version number of the server making rpc. A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java Add new assign procedure. Runs assign via Procedure Dispatch. There can only be one RegionTransitionProcedure per region running at the time, since each procedure takes a lock on the region. D hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java D hbase-server/src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java A procedure-based AM (AMv2). TODO - handle region migration - handle meta assignment first - handle sys table assignment first (e.g. acl, namespace) - handle table priorities "hbase.assignment.bootstrap.thread.pool.size"; default size is 16. "hbase.assignment.dispatch.wait.msec"; default wait is 150 "hbase.assignment.dispatch.wait.queue.max.size"; wait max default is 100 "hbase.assignment.rit.chore.interval.msec"; default is 5 * 1000; "hbase.assignment.maximum.attempts"; default is 10; A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java Procedure that runs subprocedure to unassign and then assign to new location A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java Manage store of region state (in hbase:meta by default). A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java In-memory state of all regions. Used by AMv2. A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java Base RIT procedure for Assign and Unassign. A hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java Unassign procedure. A hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java Run region assignement in a manner that pays attention to target server version. Adds "hbase.regionserver.rpc.startup.waittime"; defaults 60 seconds. Signed-off-by: Michael Stack --- .../org/apache/hadoop/hbase/ClusterStatus.java | 8 +- .../apache/hadoop/hbase/master/RegionState.java | 24 +- .../apache/hadoop/hbase/protobuf/ProtobufUtil.java | 2 +- .../hadoop/hbase/shaded/protobuf/ProtobufUtil.java | 4 +- .../hbase/shaded/protobuf/RequestConverter.java | 2 +- .../java/org/apache/hadoop/hbase/ChoreService.java | 2 +- .../java/org/apache/hadoop/hbase/TableName.java | 7 +- .../procedure2/AbstractProcedureScheduler.java | 5 +- .../hadoop/hbase/procedure2/LockAndQueue.java | 4 +- .../apache/hadoop/hbase/procedure2/Procedure.java | 6 +- .../hadoop/hbase/procedure2/ProcedureEvent.java | 3 +- .../hadoop/hbase/procedure2/ProcedureExecutor.java | 12 +- .../hbase/procedure2/ProcedureScheduler.java | 4 +- .../procedure2/RemoteProcedureDispatcher.java | 362 ++ .../hbase/procedure2/StateMachineProcedure.java | 3 + .../hbase/procedure2/ProcedureTestingUtility.java | 1 + .../shaded/protobuf/generated/AdminProtos.java | 2439 ++++++++++++- .../protobuf/generated/MasterProcedureProtos.java | 3665 ++++++++++++++++++-- .../src/main/protobuf/Admin.proto | 13 + .../src/main/protobuf/MasterProcedure.proto | 31 + .../hadoop/hbase/rsgroup/RSGroupAdminServer.java | 14 +- .../balancer/TestRSGroupBasedLoadBalancer.java | 2 +- .../tmpl/master/AssignmentManagerStatusTmpl.jamon | 7 +- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 2 +- .../hadoop/hbase/client/VersionInfoUtil.java | 81 +- .../hadoop/hbase/ipc/BalancedQueueRpcExecutor.java | 3 - .../ipc/FastPathBalancedQueueRpcExecutor.java | 2 +- .../hadoop/hbase/ipc/SimpleRpcScheduler.java | 10 +- .../apache/hadoop/hbase/ipc/SimpleRpcServer.java | 4 +- .../apache/hadoop/hbase/master/AssignCallable.java | 49 - .../hadoop/hbase/master/AssignmentManager.java | 3057 ---------------- .../apache/hadoop/hbase/master/BulkAssigner.java | 122 - .../org/apache/hadoop/hbase/master/BulkReOpen.java | 136 - .../apache/hadoop/hbase/master/CatalogJanitor.java | 5 +- .../hadoop/hbase/master/GeneralBulkAssigner.java | 214 -- .../org/apache/hadoop/hbase/master/HMaster.java | 104 +- .../apache/hadoop/hbase/master/LoadBalancer.java | 4 +- .../hadoop/hbase/master/MasterDumpServlet.java | 8 +- .../hadoop/hbase/master/MasterMetaBootstrap.java | 43 +- .../hadoop/hbase/master/MasterRpcServices.java | 101 +- .../apache/hadoop/hbase/master/MasterServices.java | 1 + .../hadoop/hbase/master/MasterWalManager.java | 9 +- .../hadoop/hbase/master/RegionStateStore.java | 268 -- .../apache/hadoop/hbase/master/RegionStates.java | 1184 ------- .../apache/hadoop/hbase/master/ServerManager.java | 23 +- .../hadoop/hbase/master/TableNamespaceManager.java | 5 +- .../hadoop/hbase/master/UnAssignCallable.java | 47 - .../hbase/master/assignment/AssignProcedure.java | 269 ++ .../hbase/master/assignment/AssignmentManager.java | 1520 ++++++++ .../master/assignment/MoveRegionProcedure.java | 147 + .../assignment/RegionAlreadyAssignedException.java | 42 + .../assignment/RegionNotAssignedException.java | 42 + .../hbase/master/assignment/RegionStateStore.java | 212 ++ .../hbase/master/assignment/RegionStates.java | 709 ++++ .../assignment/RegionTransitionProcedure.java | 313 ++ .../hbase/master/assignment/UnassignProcedure.java | 216 ++ .../hbase/master/balancer/BaseLoadBalancer.java | 2 +- .../master/balancer/RegionLocationFinder.java | 14 +- .../hbase/master/balancer/SimpleLoadBalancer.java | 9 +- .../master/balancer/StochasticLoadBalancer.java | 17 +- .../AbstractStateMachineTableProcedure.java | 3 +- .../master/procedure/AddColumnFamilyProcedure.java | 31 +- .../master/procedure/CloneSnapshotProcedure.java | 4 +- .../master/procedure/CreateTableProcedure.java | 41 +- .../procedure/DeleteColumnFamilyProcedure.java | 31 +- .../master/procedure/DeleteTableProcedure.java | 10 +- .../master/procedure/DisableTableProcedure.java | 154 +- .../master/procedure/EnableTableProcedure.java | 173 +- .../master/procedure/MasterDDLOperationHelper.java | 95 +- .../master/procedure/MasterProcedureConstants.java | 2 +- .../hbase/master/procedure/MasterProcedureEnv.java | 28 +- .../master/procedure/MasterProcedureScheduler.java | 48 +- .../procedure/MergeTableRegionsProcedure.java | 16 +- .../procedure/ModifyColumnFamilyProcedure.java | 30 +- .../master/procedure/ModifyTableProcedure.java | 30 +- .../hbase/master/procedure/ProcedureSyncWait.java | 144 +- .../master/procedure/RSProcedureDispatcher.java | 542 +++ .../master/procedure/RestoreSnapshotProcedure.java | 27 +- .../master/procedure/ServerCrashProcedure.java | 499 +-- .../procedure/SplitTableRegionProcedure.java | 111 +- .../master/procedure/TruncateTableProcedure.java | 6 +- .../hadoop/hbase/quotas/MasterQuotaManager.java | 18 +- .../hadoop/hbase/regionserver/CompactSplit.java | 722 ++++ .../hbase/regionserver/CompactSplitThread.java | 722 ---- .../hadoop/hbase/regionserver/HRegionServer.java | 12 +- .../hadoop/hbase/regionserver/RSRpcServices.java | 27 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 2 +- .../hadoop/hbase/util/ModifyRegionUtils.java | 24 +- .../org/apache/hadoop/hbase/wal/WALSplitter.java | 5 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 7 +- .../apache/hadoop/hbase/TestRegionRebalancing.java | 13 +- .../org/apache/hadoop/hbase/client/TestAdmin1.java | 17 +- .../org/apache/hadoop/hbase/client/TestAdmin2.java | 2 +- .../org/apache/hadoop/hbase/client/TestHCM.java | 10 +- .../hadoop/hbase/client/TestMetaWithReplicas.java | 3 - .../hbase/client/TestScannersFromClientSide.java | 10 +- .../hadoop/hbase/client/TestTableFavoredNodes.java | 2 +- .../hbase/io/encoding/TestChangingEncoding.java | 3 +- .../hadoop/hbase/ipc/TestSimpleRpcScheduler.java | 11 +- .../hbase/master/MockNoopMasterServices.java | 1 + .../hadoop/hbase/master/MockRegionServer.java | 8 + .../hbase/master/TestAssignmentListener.java | 1 + .../master/TestAssignmentManagerOnCluster.java | 1404 -------- .../hadoop/hbase/master/TestCatalogJanitor.java | 1 + .../hbase/master/TestDistributedLogSplitting.java | 1 + .../org/apache/hadoop/hbase/master/TestMaster.java | 1 + .../hbase/master/TestMasterBalanceThrottling.java | 9 +- .../hadoop/hbase/master/TestMasterFailover.java | 19 +- .../hbase/master/TestMasterStatusServlet.java | 4 +- .../hbase/master/TestMetaShutdownHandler.java | 1 + .../hadoop/hbase/master/TestRegionState.java | 17 +- .../hadoop/hbase/master/TestRegionStates.java | 144 - .../hadoop/hbase/master/TestRestartCluster.java | 1 + .../master/assignment/AssignmentTestingUtil.java | 125 + .../master/assignment/MockMasterServices.java | 201 ++ .../master/assignment/TestAssignmentManager.java | 568 +++ .../master/assignment/TestAssignmentOnRSCrash.java | 185 + .../hbase/master/assignment/TestRegionStates.java | 226 ++ .../procedure/MasterProcedureTestingUtility.java | 67 +- .../procedure/TestAddColumnFamilyProcedure.java | 34 +- .../procedure/TestCloneSnapshotProcedure.java | 10 +- .../procedure/TestCreateNamespaceProcedure.java | 4 +- .../master/procedure/TestCreateTableProcedure.java | 46 +- .../procedure/TestDeleteColumnFamilyProcedure.java | 31 +- .../procedure/TestDeleteNamespaceProcedure.java | 4 +- .../master/procedure/TestDeleteTableProcedure.java | 21 +- .../procedure/TestDisableTableProcedure.java | 24 +- .../master/procedure/TestEnableTableProcedure.java | 24 +- .../TestMasterFailoverWithProcedures.java | 23 +- .../procedure/TestMasterProcedureEvents.java | 2 +- .../procedure/TestModifyColumnFamilyProcedure.java | 9 +- .../procedure/TestModifyNamespaceProcedure.java | 4 +- .../master/procedure/TestModifyTableProcedure.java | 18 +- .../hbase/master/procedure/TestProcedureAdmin.java | 13 +- .../procedure/TestRestoreSnapshotProcedure.java | 13 +- .../master/procedure/TestServerCrashProcedure.java | 116 +- .../procedure/TestSplitTableRegionProcedure.java | 8 +- .../procedure/TestTableDDLProcedureBase.java | 7 +- .../procedure/TestTruncateTableProcedure.java | 11 +- .../hbase/namespace/TestNamespaceAuditor.java | 4 +- .../procedure/SimpleMasterProcedureManager.java | 2 +- .../hbase/regionserver/TestCompactSplitThread.java | 24 +- .../hadoop/hbase/regionserver/TestCompaction.java | 10 +- .../hbase/regionserver/TestHRegionFileSystem.java | 6 +- .../TestRegionMergeTransactionOnCluster.java | 14 +- .../TestSplitTransactionOnCluster.java | 32 +- .../regionserver/wal/TestAsyncLogRolling.java | 5 + .../hbase/regionserver/wal/TestLogRolling.java | 5 + .../regionserver/wal/TestSecureAsyncWALReplay.java | 5 + .../hbase/regionserver/wal/TestWALReplay.java | 5 + .../hadoop/hbase/util/BaseTestHBaseFsck.java | 4 +- .../apache/hadoop/hbase/util/TestHBaseFsckMOB.java | 2 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 17 +- .../hadoop/hbase/util/TestHBaseFsckReplicas.java | 2 +- .../hadoop/hbase/util/TestHBaseFsckTwoRS.java | 21 +- 155 files changed, 13388 insertions(+), 9425 deletions(-) create mode 100644 hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionAlreadyAssignedException.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionNotAssignedException.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index f00016d..a7a26a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -24,8 +24,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -81,7 +81,7 @@ public class ClusterStatus extends VersionedWritable { private Collection deadServers; private ServerName master; private Collection backupMasters; - private Set intransition; + private List intransition; private String clusterId; private String[] masterCoprocessors; private Boolean balancerOn; @@ -91,7 +91,7 @@ public class ClusterStatus extends VersionedWritable { final Collection deadServers, final ServerName master, final Collection backupMasters, - final Set rit, + final List rit, final String[] masterCoprocessors, final Boolean balancerOn) { this.hbaseVersion = hbaseVersion; @@ -262,7 +262,7 @@ public class ClusterStatus extends VersionedWritable { } @InterfaceAudience.Private - public Set getRegionsInTransition() { + public List getRegionsInTransition() { return this.intransition; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java index a930732..7116763 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/master/RegionState.java @@ -36,10 +36,8 @@ public class RegionState { @InterfaceStability.Evolving public enum State { OFFLINE, // region is in an offline state - PENDING_OPEN, // same as OPENING, to be removed OPENING, // server has begun to open but not yet done OPEN, // server opened region and updated meta - PENDING_CLOSE, // same as CLOSING, to be removed CLOSING, // server has begun to close but not yet done CLOSED, // server closed region and updated meta SPLITTING, // server started split of a region @@ -64,18 +62,12 @@ public class RegionState { case OFFLINE: rs = ClusterStatusProtos.RegionState.State.OFFLINE; break; - case PENDING_OPEN: - rs = ClusterStatusProtos.RegionState.State.PENDING_OPEN; - break; case OPENING: rs = ClusterStatusProtos.RegionState.State.OPENING; break; case OPEN: rs = ClusterStatusProtos.RegionState.State.OPEN; break; - case PENDING_CLOSE: - rs = ClusterStatusProtos.RegionState.State.PENDING_CLOSE; - break; case CLOSING: rs = ClusterStatusProtos.RegionState.State.CLOSING; break; @@ -124,8 +116,6 @@ public class RegionState { state = OFFLINE; break; case PENDING_OPEN: - state = PENDING_OPEN; - break; case OPENING: state = OPENING; break; @@ -133,8 +123,6 @@ public class RegionState { state = OPEN; break; case PENDING_CLOSE: - state = PENDING_CLOSE; - break; case CLOSING: state = CLOSING; break; @@ -166,7 +154,7 @@ public class RegionState { state = MERGING_NEW; break; default: - throw new IllegalStateException(""); + throw new IllegalStateException("Unhandled state " + protoState); } return state; } @@ -231,22 +219,16 @@ public class RegionState { this.ritDuration += (this.stamp - previousStamp); } - /** - * PENDING_CLOSE (to be removed) is the same as CLOSING - */ public boolean isClosing() { - return state == State.PENDING_CLOSE || state == State.CLOSING; + return state == State.CLOSING; } public boolean isClosed() { return state == State.CLOSED; } - /** - * PENDING_OPEN (to be removed) is the same as OPENING - */ public boolean isOpening() { - return state == State.PENDING_OPEN || state == State.OPENING; + return state == State.OPENING; } public boolean isOpened() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 52ee8a5..0f83bb9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -1804,7 +1804,7 @@ public final class ProtobufUtil { * has a serialized {@link ServerName} in it. * @return Returns null if data is null else converts passed data * to a ServerName instance. - * @throws DeserializationException + * @throws DeserializationException */ public static ServerName toServerName(final byte [] data) throws DeserializationException { if (data == null || data.length <= 0) return null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 271a0de..d75b504 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -3001,8 +3001,8 @@ public final class ProtobufUtil { backupMasters.add(ProtobufUtil.toServerName(sn)); } - Set rit = null; - rit = new HashSet(proto.getRegionsInTransitionList().size()); + List rit = null; + rit = new ArrayList(proto.getRegionsInTransitionList().size()); for (RegionInTransition region : proto.getRegionsInTransitionList()) { RegionState value = RegionState.convert(region.getRegionState()); rit.add(value); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index d3ef7b8..fda452f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1504,7 +1504,7 @@ public final class RequestConverter { /** * Create a RegionOpenInfo based on given region info and version of offline node */ - private static RegionOpenInfo buildRegionOpenInfo( + public static RegionOpenInfo buildRegionOpenInfo( final HRegionInfo region, final List favoredNodes, Boolean openForReplay) { RegionOpenInfo.Builder builder = RegionOpenInfo.newBuilder(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 99dc163..6a22020 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -248,7 +248,7 @@ public class ChoreService implements ChoreServicer { */ static class ChoreServiceThreadFactory implements ThreadFactory { private final String threadPrefix; - private final static String THREAD_NAME_SUFFIX = "_ChoreService_"; + private final static String THREAD_NAME_SUFFIX = "_Chore_"; private AtomicInteger threadNumber = new AtomicInteger(1); /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index 63066b3..7705d05 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -89,7 +89,12 @@ public final class TableName implements Comparable { public static final String OLD_META_STR = ".META."; public static final String OLD_ROOT_STR = "-ROOT-"; - + /** + * @return True if tn is the hbase:meta table name. + */ + public static boolean isMetaTableName(final TableName tn) { + return tn.equals(TableName.META_TABLE_NAME); + } /** * TableName for old -ROOT- table. It is used to read/process old WALs which have diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 2a678c0..f4043fc 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -25,13 +25,11 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; +import com.google.common.annotations.VisibleForTesting; @InterfaceAudience.Private -@InterfaceStability.Evolving public abstract class AbstractProcedureScheduler implements ProcedureScheduler { private static final Log LOG = LogFactory.getLog(AbstractProcedureScheduler.class); - private final ReentrantLock schedLock = new ReentrantLock(); private final Condition schedWaitCond = schedLock.newCondition(); private boolean running = false; @@ -182,6 +180,7 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler { protected abstract boolean queueHasRunnables(); @Override + @VisibleForTesting public void clear() { // NOTE: USED ONLY FOR TESTING schedLock(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index 19ba28c..b7b023c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -29,14 +29,14 @@ package org.apache.hadoop.hbase.procedure2; * assignment case as it will reduce memory footprint and number of objects to be GCed. * * - * NOT thread-safe. Needs external concurrency control. For eg. Uses in MasterProcedureScheduler are + *

NOT thread-safe. Needs external concurrency control: e.g. uses in MasterProcedureScheduler are * guarded by schedLock(). *
* There is no need of 'volatile' keyword for member variables because of memory synchronization * guarantees of locks (see 'Memory Synchronization', * http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Lock.html) *
- * We do not implement Lock interface because we need exclusive + shared locking, and also + * We do not implement Lock interface because we need exclusive and shared locking, and also * because try-lock functions require procedure id. *
* We do not use ReentrantReadWriteLock directly because of its high memory overhead. diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index fee5250..2841697 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -253,9 +253,8 @@ public abstract class Procedure implements Comparable { */ protected StringBuilder toStringSimpleSB() { final StringBuilder sb = new StringBuilder(); - toStringClassDetails(sb); - sb.append(", procId="); + sb.append("procId="); sb.append(getProcId()); if (hasParent()) { @@ -275,6 +274,9 @@ public abstract class Procedure implements Comparable { sb.append(", failed=" + getException()); } + sb.append(", "); + toStringClassDetails(sb); + return sb; } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index cb90ac0..dbeb794 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.procedure2; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Basic ProcedureEvent that contains an "object", which can be a description or a reference to the @@ -52,4 +51,4 @@ public class ProcedureEvent { public String toString() { return getClass().getSimpleName() + "(" + object + ")"; } -} +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 0dc1624..88fba44 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -365,8 +365,7 @@ public class ProcedureExecutor { assert !(proc.isFinished() && !proc.hasParent()) : "unexpected completed proc=" + proc; if (debugEnabled) { - LOG.debug(String.format("Loading state=%s isFailed=%s: %s", - proc.getState(), proc.hasException(), proc)); + LOG.debug(String.format("Loading %s", proc)); } Long rootProcId = getRootProcedureId(proc); @@ -488,7 +487,7 @@ public class ProcedureExecutor { // We have numThreads executor + one timer thread used for timing out // procedures and triggering periodic procedures. this.corePoolSize = numThreads; - LOG.info("Starting executor threads=" + corePoolSize); + LOG.info("Starting executor worker threads=" + corePoolSize); // Create the Thread Group for the executors threadGroup = new ThreadGroup("ProcedureExecutor"); @@ -527,7 +526,9 @@ public class ProcedureExecutor { store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st))); // Start the executors. Here we must have the lastProcId set. - LOG.debug("Start workers " + workerThreads.size()); + if (LOG.isTraceEnabled()) { + LOG.trace("Start workers " + workerThreads.size()); + } timeoutExecutor.start(); for (WorkerThread worker: workerThreads) { worker.start(); @@ -1152,8 +1153,7 @@ public class ProcedureExecutor { if (proc.isSuccess()) { if (LOG.isDebugEnabled()) { - LOG.debug("Completed in " + - StringUtils.humanTimeDiff(proc.elapsedTime()) + ": " + proc); + LOG.debug("Completed " + proc + " in " + StringUtils.humanTimeDiff(proc.elapsedTime())); } // Finalize the procedure state if (proc.getProcId() == rootProcId) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index 1793158..a2bbf03 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -23,13 +23,11 @@ import com.google.common.annotations.VisibleForTesting; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Keep track of the runnable procedures */ @InterfaceAudience.Private -@InterfaceStability.Evolving public interface ProcedureScheduler { /** * Start the scheduler @@ -93,7 +91,7 @@ public interface ProcedureScheduler { Procedure poll(long timeout, TimeUnit unit); /** - * Mark the event has not ready. + * Mark the event as not ready. * procedures calling waitEvent() will be suspended. * @param event the event to mark as suspended/not ready */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java new file mode 100644 index 0000000..b1c0b6f --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -0,0 +1,362 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.lang.Thread.UncaughtExceptionHandler; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.DelayQueue; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.procedure2.util.DelayedUtil; +import org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedContainerWithTimestamp; +import org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; + +import com.google.common.collect.ArrayListMultimap; + +/** + * A procedure dispatcher that aggregates and sends after elapsed time or after we hit + * count threshold. Creates its own threadpool to run RPCs with timeout. + *

    + *
  • Each server queue has a dispatch buffer
  • + *
  • Once the dispatch buffer reaches a threshold-size/time we send
  • + *
+ *

Call {@link #start()} and then {@link #submitTask(Callable)}. When done, + * call {@link #stop()}. + */ +@InterfaceAudience.Private +public abstract class RemoteProcedureDispatcher> { + private static final Log LOG = LogFactory.getLog(RemoteProcedureDispatcher.class); + + public static final String THREAD_POOL_SIZE_CONF_KEY = + "hbase.procedure.remote.dispatcher.threadpool.size"; + private static final int DEFAULT_THREAD_POOL_SIZE = 128; + + public static final String DISPATCH_DELAY_CONF_KEY = + "hbase.procedure.remote.dispatcher.delay.msec"; + private static final int DEFAULT_DISPATCH_DELAY = 150; + + public static final String DISPATCH_MAX_QUEUE_SIZE_CONF_KEY = + "hbase.procedure.remote.dispatcher.max.queue.size"; + private static final int DEFAULT_MAX_QUEUE_SIZE = 32; + + private final AtomicBoolean running = new AtomicBoolean(false); + private final ConcurrentHashMap nodeMap = + new ConcurrentHashMap(); + + private final int operationDelay; + private final int queueMaxSize; + private final int corePoolSize; + + private TimeoutExecutorThread timeoutExecutor; + private ThreadPoolExecutor threadPool; + + protected RemoteProcedureDispatcher(Configuration conf) { + this.corePoolSize = conf.getInt(THREAD_POOL_SIZE_CONF_KEY, DEFAULT_THREAD_POOL_SIZE); + this.operationDelay = conf.getInt(DISPATCH_DELAY_CONF_KEY, DEFAULT_DISPATCH_DELAY); + this.queueMaxSize = conf.getInt(DISPATCH_MAX_QUEUE_SIZE_CONF_KEY, DEFAULT_MAX_QUEUE_SIZE); + } + + public boolean start() { + if (running.getAndSet(true)) { + LOG.warn("Already running"); + return false; + } + + LOG.info("Starting procedure remote dispatcher; threads=" + this.corePoolSize + + ", queueMaxSize=" + this.queueMaxSize + ", operationDelay=" + this.operationDelay); + + // Create the timeout executor + timeoutExecutor = new TimeoutExecutorThread(); + timeoutExecutor.start(); + + // Create the thread pool that will execute RPCs + threadPool = Threads.getBoundedCachedThreadPool(corePoolSize, 60L, TimeUnit.SECONDS, + Threads.newDaemonThreadFactory("ProcedureRemoteDispatcher", getUncaughtExceptionHandler())); + return true; + } + + public boolean stop() { + if (!running.getAndSet(false)) { + return false; + } + + LOG.info("Stopping procedure remote dispatcher"); + + // send stop signals + timeoutExecutor.sendStopSignal(); + threadPool.shutdownNow(); + return true; + } + + public void join() { + assert !running.get() : "expected not running"; + + // wait the timeout executor + timeoutExecutor.awaitTermination(); + timeoutExecutor = null; + + // wait for the thread pool to terminate + threadPool.shutdownNow(); + try { + while (!threadPool.awaitTermination(60, TimeUnit.SECONDS)) { + LOG.warn("Waiting for thread-pool to terminate"); + } + } catch (InterruptedException e) { + LOG.warn("Interrupted while waiting for thread-pool termination", e); + } + } + + protected UncaughtExceptionHandler getUncaughtExceptionHandler() { + return new UncaughtExceptionHandler() { + @Override + public void uncaughtException(Thread t, Throwable e) { + LOG.warn("Failed to execute remote procedures " + t.getName(), e); + } + }; + } + + // ============================================================================================ + // Node Helpers + // ============================================================================================ + /** + * Add a node that will be able to execute remove procedures + * @param key the node identifier + */ + public void addNode(final TRemote key) { + final BufferNode newNode = new BufferNode(key); + nodeMap.putIfAbsent(key, newNode); + } + + /** + * Get the remote node that will execute remote procedures + * @param key the node identifier + */ + public RemoteNode getNode(final TRemote key) { + assert key != null : "found null key for node"; + return nodeMap.get(key); + } + + /** + * Remove a remote node + * @param key the node identifier + */ + public boolean removeNode(final TRemote key) { + final BufferNode node = nodeMap.remove(key); + if (node == null) return false; + node.abortOperationsInQueue(); + return true; + } + + // ============================================================================================ + // Task Helpers + // ============================================================================================ + protected Future submitTask(Callable task) { + return threadPool.submit(task); + } + + protected Future submitTask(Callable task, long delay, TimeUnit unit) { + final FutureTask futureTask = new FutureTask(task); + timeoutExecutor.add(new DelayedTask(futureTask, delay, unit)); + return futureTask; + } + + protected abstract void remoteDispatch(TRemote key, Set operations); + protected abstract void abortPendingOperations(TRemote key, Set operations); + + /** + * Data structure with reference to remote operation. + */ + public static abstract class RemoteOperation { + private final RemoteProcedure remoteProcedure; + + protected RemoteOperation(final RemoteProcedure remoteProcedure) { + this.remoteProcedure = remoteProcedure; + } + + public RemoteProcedure getRemoteProcedure() { + return remoteProcedure; + } + } + + /** + * Remote procedure reference. + * @param + * @param + */ + public interface RemoteProcedure { + RemoteOperation remoteCallBuild(TEnv env, TRemote remote); + void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation response); + void remoteCallFailed(TEnv env, TRemote remote, IOException exception); + } + + /** + * Account of what procedures are running on remote node. + * @param + * @param + */ + public interface RemoteNode { + TRemote getKey(); + void add(RemoteProcedure operation); + void dispatch(); + } + + protected ArrayListMultimap, RemoteOperation> buildAndGroupRequestByType(final TEnv env, + final TRemote remote, final Set operations) { + final ArrayListMultimap, RemoteOperation> requestByType = ArrayListMultimap.create(); + for (RemoteProcedure proc: operations) { + RemoteOperation operation = proc.remoteCallBuild(env, remote); + requestByType.put(operation.getClass(), operation); + } + return requestByType; + } + + protected List fetchType( + final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { + return (List)requestByType.removeAll(type); + } + + // ============================================================================================ + // Timeout Helpers + // ============================================================================================ + private final class TimeoutExecutorThread extends Thread { + private final DelayQueue queue = new DelayQueue(); + + public TimeoutExecutorThread() { + super("ProcedureDispatcherTimeoutThread"); + } + + @Override + public void run() { + while (running.get()) { + final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue); + if (task == null || task == DelayedUtil.DELAYED_POISON) { + // the executor may be shutting down, and the task is just the shutdown request + continue; + } + + if (task instanceof DelayedTask) { + threadPool.execute(((DelayedTask)task).getObject()); + } else { + ((BufferNode)task).dispatch(); + } + } + } + + public void add(final DelayedWithTimeout delayed) { + queue.add(delayed); + } + + public void remove(final DelayedWithTimeout delayed) { + queue.remove(delayed); + } + + public void sendStopSignal() { + queue.add(DelayedUtil.DELAYED_POISON); + } + + public void awaitTermination() { + try { + final long startTime = EnvironmentEdgeManager.currentTime(); + for (int i = 0; isAlive(); ++i) { + sendStopSignal(); + join(250); + if (i > 0 && (i % 8) == 0) { + LOG.warn("Waiting termination of thread " + getName() + ", " + + StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); + } + } + } catch (InterruptedException e) { + LOG.warn(getName() + " join wait got interrupted", e); + } + } + } + + // ============================================================================================ + // Internals Helpers + // ============================================================================================ + + /** + * Node that contains a set of RemoteProcedures + */ + protected final class BufferNode extends DelayedContainerWithTimestamp + implements RemoteNode { + private Set operations; + + protected BufferNode(final TRemote key) { + super(key, 0); + } + + public TRemote getKey() { + return getObject(); + } + + public synchronized void add(final RemoteProcedure operation) { + if (this.operations == null) { + this.operations = new HashSet(); + setTimeoutTimestamp(EnvironmentEdgeManager.currentTime() + operationDelay); + timeoutExecutor.add(this); + } + this.operations.add(operation); + if (this.operations.size() > queueMaxSize) { + timeoutExecutor.remove(this); + dispatch(); + } + } + + public synchronized void dispatch() { + if (operations != null) { + remoteDispatch(getKey(), operations); + this.operations = null; + } + } + + public synchronized void abortOperationsInQueue() { + if (operations != null) { + abortPendingOperations(getKey(), operations); + this.operations = null; + } + } + } + + /** + * Delayed object that holds a FutureTask. + * used to submit something later to the thread-pool. + */ + private static final class DelayedTask extends DelayedContainerWithTimestamp> { + public DelayedTask(final FutureTask task, final long delay, final TimeUnit unit) { + super(task, EnvironmentEdgeManager.currentTime() + unit.toMillis(delay)); + } + }; +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index 3f9a7b7..7a77fa1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -108,6 +108,9 @@ public abstract class StateMachineProcedure if (aborted.get() && isRollbackSupported(getCurrentState())) { setAbortFailure(getClass().getSimpleName(), "abort requested"); } else { + if (aborted.get()) { + LOG.warn("ignoring abort request " + state); + } setNextState(getStateId(state)); } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index c1b4e9b..b286f0b 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -24,6 +24,7 @@ import java.io.OutputStream; import java.util.concurrent.Callable; import java.util.ArrayList; import java.util.Set; +import java.util.concurrent.Callable; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java index b4bd84d..a68c266 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java @@ -24750,6 +24750,2259 @@ public final class AdminProtos { } + public interface ExecuteProceduresRequestOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ExecuteProceduresRequest) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + java.util.List + getOpenRegionList(); + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest getOpenRegion(int index); + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + int getOpenRegionCount(); + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + java.util.List + getOpenRegionOrBuilderList(); + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestOrBuilder getOpenRegionOrBuilder( + int index); + + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + java.util.List + getCloseRegionList(); + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest getCloseRegion(int index); + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + int getCloseRegionCount(); + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + java.util.List + getCloseRegionOrBuilderList(); + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestOrBuilder getCloseRegionOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ExecuteProceduresRequest} + */ + public static final class ExecuteProceduresRequest extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ExecuteProceduresRequest) + ExecuteProceduresRequestOrBuilder { + // Use ExecuteProceduresRequest.newBuilder() to construct. + private ExecuteProceduresRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ExecuteProceduresRequest() { + openRegion_ = java.util.Collections.emptyList(); + closeRegion_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ExecuteProceduresRequest( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + openRegion_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.PARSER, extensionRegistry)); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + closeRegion_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = java.util.Collections.unmodifiableList(openRegion_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = java.util.Collections.unmodifiableList(closeRegion_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.Builder.class); + } + + public static final int OPEN_REGION_FIELD_NUMBER = 1; + private java.util.List openRegion_; + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public java.util.List getOpenRegionList() { + return openRegion_; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public java.util.List + getOpenRegionOrBuilderList() { + return openRegion_; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public int getOpenRegionCount() { + return openRegion_.size(); + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest getOpenRegion(int index) { + return openRegion_.get(index); + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestOrBuilder getOpenRegionOrBuilder( + int index) { + return openRegion_.get(index); + } + + public static final int CLOSE_REGION_FIELD_NUMBER = 2; + private java.util.List closeRegion_; + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public java.util.List getCloseRegionList() { + return closeRegion_; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public java.util.List + getCloseRegionOrBuilderList() { + return closeRegion_; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public int getCloseRegionCount() { + return closeRegion_.size(); + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest getCloseRegion(int index) { + return closeRegion_.get(index); + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestOrBuilder getCloseRegionOrBuilder( + int index) { + return closeRegion_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getOpenRegionCount(); i++) { + if (!getOpenRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getCloseRegionCount(); i++) { + if (!getCloseRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < openRegion_.size(); i++) { + output.writeMessage(1, openRegion_.get(i)); + } + for (int i = 0; i < closeRegion_.size(); i++) { + output.writeMessage(2, closeRegion_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < openRegion_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, openRegion_.get(i)); + } + for (int i = 0; i < closeRegion_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, closeRegion_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest) obj; + + boolean result = true; + result = result && getOpenRegionList() + .equals(other.getOpenRegionList()); + result = result && getCloseRegionList() + .equals(other.getCloseRegionList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getOpenRegionCount() > 0) { + hash = (37 * hash) + OPEN_REGION_FIELD_NUMBER; + hash = (53 * hash) + getOpenRegionList().hashCode(); + } + if (getCloseRegionCount() > 0) { + hash = (37 * hash) + CLOSE_REGION_FIELD_NUMBER; + hash = (53 * hash) + getCloseRegionList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ExecuteProceduresRequest} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ExecuteProceduresRequest) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequestOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresRequest_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getOpenRegionFieldBuilder(); + getCloseRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (openRegionBuilder_ == null) { + openRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + openRegionBuilder_.clear(); + } + if (closeRegionBuilder_ == null) { + closeRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + closeRegionBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresRequest_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest(this); + int from_bitField0_ = bitField0_; + if (openRegionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = java.util.Collections.unmodifiableList(openRegion_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.openRegion_ = openRegion_; + } else { + result.openRegion_ = openRegionBuilder_.build(); + } + if (closeRegionBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = java.util.Collections.unmodifiableList(closeRegion_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.closeRegion_ = closeRegion_; + } else { + result.closeRegion_ = closeRegionBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance()) return this; + if (openRegionBuilder_ == null) { + if (!other.openRegion_.isEmpty()) { + if (openRegion_.isEmpty()) { + openRegion_ = other.openRegion_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOpenRegionIsMutable(); + openRegion_.addAll(other.openRegion_); + } + onChanged(); + } + } else { + if (!other.openRegion_.isEmpty()) { + if (openRegionBuilder_.isEmpty()) { + openRegionBuilder_.dispose(); + openRegionBuilder_ = null; + openRegion_ = other.openRegion_; + bitField0_ = (bitField0_ & ~0x00000001); + openRegionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getOpenRegionFieldBuilder() : null; + } else { + openRegionBuilder_.addAllMessages(other.openRegion_); + } + } + } + if (closeRegionBuilder_ == null) { + if (!other.closeRegion_.isEmpty()) { + if (closeRegion_.isEmpty()) { + closeRegion_ = other.closeRegion_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureCloseRegionIsMutable(); + closeRegion_.addAll(other.closeRegion_); + } + onChanged(); + } + } else { + if (!other.closeRegion_.isEmpty()) { + if (closeRegionBuilder_.isEmpty()) { + closeRegionBuilder_.dispose(); + closeRegionBuilder_ = null; + closeRegion_ = other.closeRegion_; + bitField0_ = (bitField0_ & ~0x00000002); + closeRegionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getCloseRegionFieldBuilder() : null; + } else { + closeRegionBuilder_.addAllMessages(other.closeRegion_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getOpenRegionCount(); i++) { + if (!getOpenRegion(i).isInitialized()) { + return false; + } + } + for (int i = 0; i < getCloseRegionCount(); i++) { + if (!getCloseRegion(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List openRegion_ = + java.util.Collections.emptyList(); + private void ensureOpenRegionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = new java.util.ArrayList(openRegion_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestOrBuilder> openRegionBuilder_; + + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public java.util.List getOpenRegionList() { + if (openRegionBuilder_ == null) { + return java.util.Collections.unmodifiableList(openRegion_); + } else { + return openRegionBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public int getOpenRegionCount() { + if (openRegionBuilder_ == null) { + return openRegion_.size(); + } else { + return openRegionBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest getOpenRegion(int index) { + if (openRegionBuilder_ == null) { + return openRegion_.get(index); + } else { + return openRegionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder setOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest value) { + if (openRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpenRegionIsMutable(); + openRegion_.set(index, value); + onChanged(); + } else { + openRegionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder setOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder builderForValue) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.set(index, builderForValue.build()); + onChanged(); + } else { + openRegionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder addOpenRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest value) { + if (openRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpenRegionIsMutable(); + openRegion_.add(value); + onChanged(); + } else { + openRegionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder addOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest value) { + if (openRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpenRegionIsMutable(); + openRegion_.add(index, value); + onChanged(); + } else { + openRegionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder addOpenRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder builderForValue) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.add(builderForValue.build()); + onChanged(); + } else { + openRegionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder addOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder builderForValue) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.add(index, builderForValue.build()); + onChanged(); + } else { + openRegionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder addAllOpenRegion( + java.lang.Iterable values) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, openRegion_); + onChanged(); + } else { + openRegionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder clearOpenRegion() { + if (openRegionBuilder_ == null) { + openRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + openRegionBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public Builder removeOpenRegion(int index) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.remove(index); + onChanged(); + } else { + openRegionBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder getOpenRegionBuilder( + int index) { + return getOpenRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestOrBuilder getOpenRegionOrBuilder( + int index) { + if (openRegionBuilder_ == null) { + return openRegion_.get(index); } else { + return openRegionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public java.util.List + getOpenRegionOrBuilderList() { + if (openRegionBuilder_ != null) { + return openRegionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(openRegion_); + } + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder addOpenRegionBuilder() { + return getOpenRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance()); + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder addOpenRegionBuilder( + int index) { + return getOpenRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance()); + } + /** + * repeated .hbase.pb.OpenRegionRequest open_region = 1; + */ + public java.util.List + getOpenRegionBuilderList() { + return getOpenRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestOrBuilder> + getOpenRegionFieldBuilder() { + if (openRegionBuilder_ == null) { + openRegionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestOrBuilder>( + openRegion_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + openRegion_ = null; + } + return openRegionBuilder_; + } + + private java.util.List closeRegion_ = + java.util.Collections.emptyList(); + private void ensureCloseRegionIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = new java.util.ArrayList(closeRegion_); + bitField0_ |= 0x00000002; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestOrBuilder> closeRegionBuilder_; + + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public java.util.List getCloseRegionList() { + if (closeRegionBuilder_ == null) { + return java.util.Collections.unmodifiableList(closeRegion_); + } else { + return closeRegionBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public int getCloseRegionCount() { + if (closeRegionBuilder_ == null) { + return closeRegion_.size(); + } else { + return closeRegionBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest getCloseRegion(int index) { + if (closeRegionBuilder_ == null) { + return closeRegion_.get(index); + } else { + return closeRegionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder setCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest value) { + if (closeRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCloseRegionIsMutable(); + closeRegion_.set(index, value); + onChanged(); + } else { + closeRegionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder setCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder builderForValue) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.set(index, builderForValue.build()); + onChanged(); + } else { + closeRegionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder addCloseRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest value) { + if (closeRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCloseRegionIsMutable(); + closeRegion_.add(value); + onChanged(); + } else { + closeRegionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder addCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest value) { + if (closeRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCloseRegionIsMutable(); + closeRegion_.add(index, value); + onChanged(); + } else { + closeRegionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder addCloseRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder builderForValue) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.add(builderForValue.build()); + onChanged(); + } else { + closeRegionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder addCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder builderForValue) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.add(index, builderForValue.build()); + onChanged(); + } else { + closeRegionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder addAllCloseRegion( + java.lang.Iterable values) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, closeRegion_); + onChanged(); + } else { + closeRegionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder clearCloseRegion() { + if (closeRegionBuilder_ == null) { + closeRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + closeRegionBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public Builder removeCloseRegion(int index) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.remove(index); + onChanged(); + } else { + closeRegionBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder getCloseRegionBuilder( + int index) { + return getCloseRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestOrBuilder getCloseRegionOrBuilder( + int index) { + if (closeRegionBuilder_ == null) { + return closeRegion_.get(index); } else { + return closeRegionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public java.util.List + getCloseRegionOrBuilderList() { + if (closeRegionBuilder_ != null) { + return closeRegionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(closeRegion_); + } + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder addCloseRegionBuilder() { + return getCloseRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance()); + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder addCloseRegionBuilder( + int index) { + return getCloseRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance()); + } + /** + * repeated .hbase.pb.CloseRegionRequest close_region = 2; + */ + public java.util.List + getCloseRegionBuilderList() { + return getCloseRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestOrBuilder> + getCloseRegionFieldBuilder() { + if (closeRegionBuilder_ == null) { + closeRegionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestOrBuilder>( + closeRegion_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + closeRegion_ = null; + } + return closeRegionBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ExecuteProceduresRequest) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ExecuteProceduresRequest) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ExecuteProceduresRequest parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ExecuteProceduresRequest(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface ExecuteProceduresResponseOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.ExecuteProceduresResponse) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + java.util.List + getOpenRegionList(); + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse getOpenRegion(int index); + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + int getOpenRegionCount(); + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + java.util.List + getOpenRegionOrBuilderList(); + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponseOrBuilder getOpenRegionOrBuilder( + int index); + + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + java.util.List + getCloseRegionList(); + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse getCloseRegion(int index); + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + int getCloseRegionCount(); + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + java.util.List + getCloseRegionOrBuilderList(); + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseOrBuilder getCloseRegionOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ExecuteProceduresResponse} + */ + public static final class ExecuteProceduresResponse extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.ExecuteProceduresResponse) + ExecuteProceduresResponseOrBuilder { + // Use ExecuteProceduresResponse.newBuilder() to construct. + private ExecuteProceduresResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private ExecuteProceduresResponse() { + openRegion_ = java.util.Collections.emptyList(); + closeRegion_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ExecuteProceduresResponse( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + openRegion_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.PARSER, extensionRegistry)); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + closeRegion_.add( + input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.PARSER, extensionRegistry)); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = java.util.Collections.unmodifiableList(openRegion_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = java.util.Collections.unmodifiableList(closeRegion_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.Builder.class); + } + + public static final int OPEN_REGION_FIELD_NUMBER = 1; + private java.util.List openRegion_; + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public java.util.List getOpenRegionList() { + return openRegion_; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public java.util.List + getOpenRegionOrBuilderList() { + return openRegion_; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public int getOpenRegionCount() { + return openRegion_.size(); + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse getOpenRegion(int index) { + return openRegion_.get(index); + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponseOrBuilder getOpenRegionOrBuilder( + int index) { + return openRegion_.get(index); + } + + public static final int CLOSE_REGION_FIELD_NUMBER = 2; + private java.util.List closeRegion_; + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public java.util.List getCloseRegionList() { + return closeRegion_; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public java.util.List + getCloseRegionOrBuilderList() { + return closeRegion_; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public int getCloseRegionCount() { + return closeRegion_.size(); + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse getCloseRegion(int index) { + return closeRegion_.get(index); + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseOrBuilder getCloseRegionOrBuilder( + int index) { + return closeRegion_.get(index); + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getCloseRegionCount(); i++) { + if (!getCloseRegion(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + for (int i = 0; i < openRegion_.size(); i++) { + output.writeMessage(1, openRegion_.get(i)); + } + for (int i = 0; i < closeRegion_.size(); i++) { + output.writeMessage(2, closeRegion_.get(i)); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < openRegion_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, openRegion_.get(i)); + } + for (int i = 0; i < closeRegion_.size(); i++) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, closeRegion_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse) obj; + + boolean result = true; + result = result && getOpenRegionList() + .equals(other.getOpenRegionList()); + result = result && getCloseRegionList() + .equals(other.getCloseRegionList()); + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getOpenRegionCount() > 0) { + hash = (37 * hash) + OPEN_REGION_FIELD_NUMBER; + hash = (53 * hash) + getOpenRegionList().hashCode(); + } + if (getCloseRegionCount() > 0) { + hash = (37 * hash) + CLOSE_REGION_FIELD_NUMBER; + hash = (53 * hash) + getCloseRegionList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ExecuteProceduresResponse} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.ExecuteProceduresResponse) + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponseOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresResponse_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getOpenRegionFieldBuilder(); + getCloseRegionFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (openRegionBuilder_ == null) { + openRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + openRegionBuilder_.clear(); + } + if (closeRegionBuilder_ == null) { + closeRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + closeRegionBuilder_.clear(); + } + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_ExecuteProceduresResponse_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse(this); + int from_bitField0_ = bitField0_; + if (openRegionBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = java.util.Collections.unmodifiableList(openRegion_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.openRegion_ = openRegion_; + } else { + result.openRegion_ = openRegionBuilder_.build(); + } + if (closeRegionBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = java.util.Collections.unmodifiableList(closeRegion_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.closeRegion_ = closeRegion_; + } else { + result.closeRegion_ = closeRegionBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance()) return this; + if (openRegionBuilder_ == null) { + if (!other.openRegion_.isEmpty()) { + if (openRegion_.isEmpty()) { + openRegion_ = other.openRegion_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureOpenRegionIsMutable(); + openRegion_.addAll(other.openRegion_); + } + onChanged(); + } + } else { + if (!other.openRegion_.isEmpty()) { + if (openRegionBuilder_.isEmpty()) { + openRegionBuilder_.dispose(); + openRegionBuilder_ = null; + openRegion_ = other.openRegion_; + bitField0_ = (bitField0_ & ~0x00000001); + openRegionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getOpenRegionFieldBuilder() : null; + } else { + openRegionBuilder_.addAllMessages(other.openRegion_); + } + } + } + if (closeRegionBuilder_ == null) { + if (!other.closeRegion_.isEmpty()) { + if (closeRegion_.isEmpty()) { + closeRegion_ = other.closeRegion_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureCloseRegionIsMutable(); + closeRegion_.addAll(other.closeRegion_); + } + onChanged(); + } + } else { + if (!other.closeRegion_.isEmpty()) { + if (closeRegionBuilder_.isEmpty()) { + closeRegionBuilder_.dispose(); + closeRegionBuilder_ = null; + closeRegion_ = other.closeRegion_; + bitField0_ = (bitField0_ & ~0x00000002); + closeRegionBuilder_ = + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + getCloseRegionFieldBuilder() : null; + } else { + closeRegionBuilder_.addAllMessages(other.closeRegion_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getCloseRegionCount(); i++) { + if (!getCloseRegion(i).isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private java.util.List openRegion_ = + java.util.Collections.emptyList(); + private void ensureOpenRegionIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + openRegion_ = new java.util.ArrayList(openRegion_); + bitField0_ |= 0x00000001; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponseOrBuilder> openRegionBuilder_; + + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public java.util.List getOpenRegionList() { + if (openRegionBuilder_ == null) { + return java.util.Collections.unmodifiableList(openRegion_); + } else { + return openRegionBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public int getOpenRegionCount() { + if (openRegionBuilder_ == null) { + return openRegion_.size(); + } else { + return openRegionBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse getOpenRegion(int index) { + if (openRegionBuilder_ == null) { + return openRegion_.get(index); + } else { + return openRegionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder setOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse value) { + if (openRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpenRegionIsMutable(); + openRegion_.set(index, value); + onChanged(); + } else { + openRegionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder setOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder builderForValue) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.set(index, builderForValue.build()); + onChanged(); + } else { + openRegionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder addOpenRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse value) { + if (openRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpenRegionIsMutable(); + openRegion_.add(value); + onChanged(); + } else { + openRegionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder addOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse value) { + if (openRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureOpenRegionIsMutable(); + openRegion_.add(index, value); + onChanged(); + } else { + openRegionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder addOpenRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder builderForValue) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.add(builderForValue.build()); + onChanged(); + } else { + openRegionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder addOpenRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder builderForValue) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.add(index, builderForValue.build()); + onChanged(); + } else { + openRegionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder addAllOpenRegion( + java.lang.Iterable values) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, openRegion_); + onChanged(); + } else { + openRegionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder clearOpenRegion() { + if (openRegionBuilder_ == null) { + openRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + openRegionBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public Builder removeOpenRegion(int index) { + if (openRegionBuilder_ == null) { + ensureOpenRegionIsMutable(); + openRegion_.remove(index); + onChanged(); + } else { + openRegionBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder getOpenRegionBuilder( + int index) { + return getOpenRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponseOrBuilder getOpenRegionOrBuilder( + int index) { + if (openRegionBuilder_ == null) { + return openRegion_.get(index); } else { + return openRegionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public java.util.List + getOpenRegionOrBuilderList() { + if (openRegionBuilder_ != null) { + return openRegionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(openRegion_); + } + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder addOpenRegionBuilder() { + return getOpenRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder addOpenRegionBuilder( + int index) { + return getOpenRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.OpenRegionResponse open_region = 1; + */ + public java.util.List + getOpenRegionBuilderList() { + return getOpenRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponseOrBuilder> + getOpenRegionFieldBuilder() { + if (openRegionBuilder_ == null) { + openRegionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponseOrBuilder>( + openRegion_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + openRegion_ = null; + } + return openRegionBuilder_; + } + + private java.util.List closeRegion_ = + java.util.Collections.emptyList(); + private void ensureCloseRegionIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + closeRegion_ = new java.util.ArrayList(closeRegion_); + bitField0_ |= 0x00000002; + } + } + + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseOrBuilder> closeRegionBuilder_; + + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public java.util.List getCloseRegionList() { + if (closeRegionBuilder_ == null) { + return java.util.Collections.unmodifiableList(closeRegion_); + } else { + return closeRegionBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public int getCloseRegionCount() { + if (closeRegionBuilder_ == null) { + return closeRegion_.size(); + } else { + return closeRegionBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse getCloseRegion(int index) { + if (closeRegionBuilder_ == null) { + return closeRegion_.get(index); + } else { + return closeRegionBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder setCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse value) { + if (closeRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCloseRegionIsMutable(); + closeRegion_.set(index, value); + onChanged(); + } else { + closeRegionBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder setCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder builderForValue) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.set(index, builderForValue.build()); + onChanged(); + } else { + closeRegionBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder addCloseRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse value) { + if (closeRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCloseRegionIsMutable(); + closeRegion_.add(value); + onChanged(); + } else { + closeRegionBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder addCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse value) { + if (closeRegionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureCloseRegionIsMutable(); + closeRegion_.add(index, value); + onChanged(); + } else { + closeRegionBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder addCloseRegion( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder builderForValue) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.add(builderForValue.build()); + onChanged(); + } else { + closeRegionBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder addCloseRegion( + int index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder builderForValue) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.add(index, builderForValue.build()); + onChanged(); + } else { + closeRegionBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder addAllCloseRegion( + java.lang.Iterable values) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll( + values, closeRegion_); + onChanged(); + } else { + closeRegionBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder clearCloseRegion() { + if (closeRegionBuilder_ == null) { + closeRegion_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + closeRegionBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public Builder removeCloseRegion(int index) { + if (closeRegionBuilder_ == null) { + ensureCloseRegionIsMutable(); + closeRegion_.remove(index); + onChanged(); + } else { + closeRegionBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder getCloseRegionBuilder( + int index) { + return getCloseRegionFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseOrBuilder getCloseRegionOrBuilder( + int index) { + if (closeRegionBuilder_ == null) { + return closeRegion_.get(index); } else { + return closeRegionBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public java.util.List + getCloseRegionOrBuilderList() { + if (closeRegionBuilder_ != null) { + return closeRegionBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(closeRegion_); + } + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder addCloseRegionBuilder() { + return getCloseRegionFieldBuilder().addBuilder( + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder addCloseRegionBuilder( + int index) { + return getCloseRegionFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance()); + } + /** + * repeated .hbase.pb.CloseRegionResponse close_region = 2; + */ + public java.util.List + getCloseRegionBuilderList() { + return getCloseRegionFieldBuilder().getBuilderList(); + } + private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseOrBuilder> + getCloseRegionFieldBuilder() { + if (closeRegionBuilder_ == null) { + closeRegionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseOrBuilder>( + closeRegion_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + closeRegion_ = null; + } + return closeRegionBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.ExecuteProceduresResponse) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ExecuteProceduresResponse) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public ExecuteProceduresResponse parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new ExecuteProceduresResponse(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + /** * Protobuf service {@code hbase.pb.AdminService} */ @@ -24902,6 +27155,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc ExecuteProcedures(.hbase.pb.ExecuteProceduresRequest) returns (.hbase.pb.ExecuteProceduresResponse); + */ + public abstract void executeProcedures( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + } public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService( @@ -25051,6 +27312,14 @@ public final class AdminProtos { impl.getRegionLoad(controller, request, done); } + @java.lang.Override + public void executeProcedures( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + impl.executeProcedures(controller, request, done); + } + }; } @@ -25109,6 +27378,8 @@ public final class AdminProtos { return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); case 17: return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request); + case 18: + return impl.executeProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25159,6 +27430,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25209,6 +27482,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25361,6 +27636,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request, org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + /** + * rpc ExecuteProcedures(.hbase.pb.ExecuteProceduresRequest) returns (.hbase.pb.ExecuteProceduresResponse); + */ + public abstract void executeProcedures( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done); + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -25473,6 +27756,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 18: + this.executeProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest)request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -25523,6 +27811,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25573,6 +27863,8 @@ public final class AdminProtos { return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); case 17: return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(); + case 18: + return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -25863,6 +28155,21 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance())); } + + public void executeProcedures( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request, + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance(), + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.class, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -25960,6 +28267,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse executeProcedures( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -26184,6 +28496,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse executeProcedures( + org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest request) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(18), + controller, + request, + org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.AdminService) @@ -26379,6 +28703,16 @@ public final class AdminProtos { private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_GetRegionLoadResponse_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ExecuteProceduresRequest_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ExecuteProceduresRequest_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ExecuteProceduresResponse_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ExecuteProceduresResponse_fieldAccessorTable; public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -26464,48 +28798,57 @@ public final class AdminProtos { "tionResponse\"?\n\024GetRegionLoadRequest\022\'\n\n" + "table_name\030\001 \001(\0132\023.hbase.pb.TableName\"C\n" + "\025GetRegionLoadResponse\022*\n\014region_loads\030\001" + - " \003(\0132\024.hbase.pb.RegionLoad2\203\014\n\014AdminServ" + - "ice\022P\n\rGetRegionInfo\022\036.hbase.pb.GetRegio" + - "nInfoRequest\032\037.hbase.pb.GetRegionInfoRes" + - "ponse\022M\n\014GetStoreFile\022\035.hbase.pb.GetStor", - "eFileRequest\032\036.hbase.pb.GetStoreFileResp" + - "onse\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOn" + - "lineRegionRequest\032!.hbase.pb.GetOnlineRe" + - "gionResponse\022G\n\nOpenRegion\022\033.hbase.pb.Op" + - "enRegionRequest\032\034.hbase.pb.OpenRegionRes" + - "ponse\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupR" + - "egionRequest\032\036.hbase.pb.WarmupRegionResp" + - "onse\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegi" + - "onRequest\032\035.hbase.pb.CloseRegionResponse" + - "\022w\n\032CloseRegionForSplitOrMerge\022+.hbase.p", - "b.CloseRegionForSplitOrMergeRequest\032,.hb" + - "ase.pb.CloseRegionForSplitOrMergeRespons" + - "e\022J\n\013FlushRegion\022\034.hbase.pb.FlushRegionR" + - "equest\032\035.hbase.pb.FlushRegionResponse\022J\n" + - "\013SplitRegion\022\034.hbase.pb.SplitRegionReque" + - "st\032\035.hbase.pb.SplitRegionResponse\022P\n\rCom" + - "pactRegion\022\036.hbase.pb.CompactRegionReque" + - "st\032\037.hbase.pb.CompactRegionResponse\022\\\n\021R" + - "eplicateWALEntry\022\".hbase.pb.ReplicateWAL" + - "EntryRequest\032#.hbase.pb.ReplicateWALEntr", - "yResponse\022Q\n\006Replay\022\".hbase.pb.Replicate" + - "WALEntryRequest\032#.hbase.pb.ReplicateWALE" + - "ntryResponse\022P\n\rRollWALWriter\022\036.hbase.pb" + - ".RollWALWriterRequest\032\037.hbase.pb.RollWAL" + - "WriterResponse\022P\n\rGetServerInfo\022\036.hbase." + - "pb.GetServerInfoRequest\032\037.hbase.pb.GetSe" + - "rverInfoResponse\022G\n\nStopServer\022\033.hbase.p" + - "b.StopServerRequest\032\034.hbase.pb.StopServe" + - "rResponse\022_\n\022UpdateFavoredNodes\022#.hbase." + - "pb.UpdateFavoredNodesRequest\032$.hbase.pb.", - "UpdateFavoredNodesResponse\022b\n\023UpdateConf" + - "iguration\022$.hbase.pb.UpdateConfiguration" + - "Request\032%.hbase.pb.UpdateConfigurationRe" + - "sponse\022P\n\rGetRegionLoad\022\036.hbase.pb.GetRe" + - "gionLoadRequest\032\037.hbase.pb.GetRegionLoad" + - "ResponseBH\n1org.apache.hadoop.hbase.shad" + - "ed.protobuf.generatedB\013AdminProtosH\001\210\001\001\240" + - "\001\001" + " \003(\0132\024.hbase.pb.RegionLoad\"\200\001\n\030ExecutePr" + + "oceduresRequest\0220\n\013open_region\030\001 \003(\0132\033.h" + + "base.pb.OpenRegionRequest\0222\n\014close_regio" + + "n\030\002 \003(\0132\034.hbase.pb.CloseRegionRequest\"\203\001", + "\n\031ExecuteProceduresResponse\0221\n\013open_regi" + + "on\030\001 \003(\0132\034.hbase.pb.OpenRegionResponse\0223" + + "\n\014close_region\030\002 \003(\0132\035.hbase.pb.CloseReg" + + "ionResponse2\341\014\n\014AdminService\022P\n\rGetRegio" + + "nInfo\022\036.hbase.pb.GetRegionInfoRequest\032\037." + + "hbase.pb.GetRegionInfoResponse\022M\n\014GetSto" + + "reFile\022\035.hbase.pb.GetStoreFileRequest\032\036." + + "hbase.pb.GetStoreFileResponse\022V\n\017GetOnli" + + "neRegion\022 .hbase.pb.GetOnlineRegionReque" + + "st\032!.hbase.pb.GetOnlineRegionResponse\022G\n", + "\nOpenRegion\022\033.hbase.pb.OpenRegionRequest" + + "\032\034.hbase.pb.OpenRegionResponse\022M\n\014Warmup" + + "Region\022\035.hbase.pb.WarmupRegionRequest\032\036." + + "hbase.pb.WarmupRegionResponse\022J\n\013CloseRe" + + "gion\022\034.hbase.pb.CloseRegionRequest\032\035.hba" + + "se.pb.CloseRegionResponse\022w\n\032CloseRegion" + + "ForSplitOrMerge\022+.hbase.pb.CloseRegionFo" + + "rSplitOrMergeRequest\032,.hbase.pb.CloseReg" + + "ionForSplitOrMergeResponse\022J\n\013FlushRegio" + + "n\022\034.hbase.pb.FlushRegionRequest\032\035.hbase.", + "pb.FlushRegionResponse\022J\n\013SplitRegion\022\034." + + "hbase.pb.SplitRegionRequest\032\035.hbase.pb.S" + + "plitRegionResponse\022P\n\rCompactRegion\022\036.hb" + + "ase.pb.CompactRegionRequest\032\037.hbase.pb.C" + + "ompactRegionResponse\022\\\n\021ReplicateWALEntr" + + "y\022\".hbase.pb.ReplicateWALEntryRequest\032#." + + "hbase.pb.ReplicateWALEntryResponse\022Q\n\006Re" + + "play\022\".hbase.pb.ReplicateWALEntryRequest" + + "\032#.hbase.pb.ReplicateWALEntryResponse\022P\n" + + "\rRollWALWriter\022\036.hbase.pb.RollWALWriterR", + "equest\032\037.hbase.pb.RollWALWriterResponse\022" + + "P\n\rGetServerInfo\022\036.hbase.pb.GetServerInf" + + "oRequest\032\037.hbase.pb.GetServerInfoRespons" + + "e\022G\n\nStopServer\022\033.hbase.pb.StopServerReq" + + "uest\032\034.hbase.pb.StopServerResponse\022_\n\022Up" + + "dateFavoredNodes\022#.hbase.pb.UpdateFavore" + + "dNodesRequest\032$.hbase.pb.UpdateFavoredNo" + + "desResponse\022b\n\023UpdateConfiguration\022$.hba" + + "se.pb.UpdateConfigurationRequest\032%.hbase" + + ".pb.UpdateConfigurationResponse\022P\n\rGetRe", + "gionLoad\022\036.hbase.pb.GetRegionLoadRequest" + + "\032\037.hbase.pb.GetRegionLoadResponse\022\\\n\021Exe" + + "cuteProcedures\022\".hbase.pb.ExecuteProcedu" + + "resRequest\032#.hbase.pb.ExecuteProceduresR" + + "esponseBH\n1org.apache.hadoop.hbase.shade" + + "d.protobuf.generatedB\013AdminProtosH\001\210\001\001\240\001" + + "\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -26750,6 +29093,18 @@ public final class AdminProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GetRegionLoadResponse_descriptor, new java.lang.String[] { "RegionLoads", }); + internal_static_hbase_pb_ExecuteProceduresRequest_descriptor = + getDescriptor().getMessageTypes().get(36); + internal_static_hbase_pb_ExecuteProceduresRequest_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ExecuteProceduresRequest_descriptor, + new java.lang.String[] { "OpenRegion", "CloseRegion", }); + internal_static_hbase_pb_ExecuteProceduresResponse_descriptor = + getDescriptor().getMessageTypes().get(37); + internal_static_hbase_pb_ExecuteProceduresResponse_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_ExecuteProceduresResponse_descriptor, + new java.lang.String[] { "OpenRegion", "CloseRegion", }); org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.getDescriptor(); diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java index f4f6a8c..47136e2 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java @@ -2209,6 +2209,195 @@ public final class MasterProcedureProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.ServerCrashState) } + /** + * Protobuf enum {@code hbase.pb.RegionTransitionState} + */ + public enum RegionTransitionState + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + * REGION_TRANSITION_QUEUE = 1; + */ + REGION_TRANSITION_QUEUE(1), + /** + * REGION_TRANSITION_DISPATCH = 2; + */ + REGION_TRANSITION_DISPATCH(2), + /** + * REGION_TRANSITION_FINISH = 3; + */ + REGION_TRANSITION_FINISH(3), + ; + + /** + * REGION_TRANSITION_QUEUE = 1; + */ + public static final int REGION_TRANSITION_QUEUE_VALUE = 1; + /** + * REGION_TRANSITION_DISPATCH = 2; + */ + public static final int REGION_TRANSITION_DISPATCH_VALUE = 2; + /** + * REGION_TRANSITION_FINISH = 3; + */ + public static final int REGION_TRANSITION_FINISH_VALUE = 3; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RegionTransitionState valueOf(int value) { + return forNumber(value); + } + + public static RegionTransitionState forNumber(int value) { + switch (value) { + case 1: return REGION_TRANSITION_QUEUE; + case 2: return REGION_TRANSITION_DISPATCH; + case 3: return REGION_TRANSITION_FINISH; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + RegionTransitionState> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public RegionTransitionState findValueByNumber(int number) { + return RegionTransitionState.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(17); + } + + private static final RegionTransitionState[] VALUES = values(); + + public static RegionTransitionState valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private RegionTransitionState(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RegionTransitionState) + } + + /** + * Protobuf enum {@code hbase.pb.MoveRegionState} + */ + public enum MoveRegionState + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + * MOVE_REGION_UNASSIGN = 1; + */ + MOVE_REGION_UNASSIGN(1), + /** + * MOVE_REGION_ASSIGN = 2; + */ + MOVE_REGION_ASSIGN(2), + ; + + /** + * MOVE_REGION_UNASSIGN = 1; + */ + public static final int MOVE_REGION_UNASSIGN_VALUE = 1; + /** + * MOVE_REGION_ASSIGN = 2; + */ + public static final int MOVE_REGION_ASSIGN_VALUE = 2; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static MoveRegionState valueOf(int value) { + return forNumber(value); + } + + public static MoveRegionState forNumber(int value) { + switch (value) { + case 1: return MOVE_REGION_UNASSIGN; + case 2: return MOVE_REGION_ASSIGN; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + MoveRegionState> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public MoveRegionState findValueByNumber(int number) { + return MoveRegionState.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(18); + } + + private static final MoveRegionState[] VALUES = values(); + + public static MoveRegionState valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private MoveRegionState(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.MoveRegionState) + } + public interface CreateTableStateDataOrBuilder extends // @@protoc_insertion_point(interface_extends:hbase.pb.CreateTableStateData) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { @@ -23958,104 +24147,3125 @@ public final class MasterProcedureProtos { } - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_CreateTableStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_CreateTableStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ModifyTableStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_ModifyTableStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_TruncateTableStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_TruncateTableStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_DeleteTableStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_DeleteTableStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_CreateNamespaceStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_CreateNamespaceStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ModifyNamespaceStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_ModifyNamespaceStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_DeleteNamespaceStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_DeleteNamespaceStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_AddColumnFamilyStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_AddColumnFamilyStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ModifyColumnFamilyStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_ModifyColumnFamilyStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_DeleteColumnFamilyStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_DeleteColumnFamilyStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_EnableTableStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_EnableTableStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_DisableTableStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_DisableTableStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_RestoreParentToChildRegionsPair_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_CloneSnapshotStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_CloneSnapshotStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_RestoreSnapshotStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_MergeTableRegionsStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_SplitTableRegionStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable; - private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ServerCrashStateData_descriptor; - private static final - org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable; + public interface AssignRegionStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.AssignRegionStateData) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { - public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + boolean hasTransitionState(); + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState(); + + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + boolean hasRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + /** + * optional bool force_new_plan = 3 [default = false]; + */ + boolean hasForceNewPlan(); + /** + * optional bool force_new_plan = 3 [default = false]; + */ + boolean getForceNewPlan(); + + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + boolean hasTargetServer(); + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer(); + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder(); } - private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { + /** + * Protobuf type {@code hbase.pb.AssignRegionStateData} + */ + public static final class AssignRegionStateData extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.AssignRegionStateData) + AssignRegionStateDataOrBuilder { + // Use AssignRegionStateData.newBuilder() to construct. + private AssignRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private AssignRegionStateData() { + transitionState_ = 1; + forceNewPlan_ = false; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AssignRegionStateData( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState value = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + transitionState_ = rawValue; + } + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + forceNewPlan_ = input.readBool(); + break; + } + case 34: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = targetServer_.toBuilder(); + } + targetServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(targetServer_); + targetServer_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.Builder.class); + } + + private int bitField0_; + public static final int TRANSITION_STATE_FIELD_NUMBER = 1; + private int transitionState_; + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public boolean hasTransitionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState result = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(transitionState_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.REGION_TRANSITION_QUEUE : result; + } + + public static final int REGION_INFO_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + + public static final int FORCE_NEW_PLAN_FIELD_NUMBER = 3; + private boolean forceNewPlan_; + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean hasForceNewPlan() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean getForceNewPlan() { + return forceNewPlan_; + } + + public static final int TARGET_SERVER_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName targetServer_; + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public boolean hasTargetServer() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer() { + return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder() { + return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasTransitionState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasTargetServer()) { + if (!getTargetServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, transitionState_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, forceNewPlan_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, getTargetServer()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(1, transitionState_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(3, forceNewPlan_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getTargetServer()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData) obj; + + boolean result = true; + result = result && (hasTransitionState() == other.hasTransitionState()); + if (hasTransitionState()) { + result = result && transitionState_ == other.transitionState_; + } + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && (hasForceNewPlan() == other.hasForceNewPlan()); + if (hasForceNewPlan()) { + result = result && (getForceNewPlan() + == other.getForceNewPlan()); + } + result = result && (hasTargetServer() == other.hasTargetServer()); + if (hasTargetServer()) { + result = result && getTargetServer() + .equals(other.getTargetServer()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTransitionState()) { + hash = (37 * hash) + TRANSITION_STATE_FIELD_NUMBER; + hash = (53 * hash) + transitionState_; + } + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + if (hasForceNewPlan()) { + hash = (37 * hash) + FORCE_NEW_PLAN_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getForceNewPlan()); + } + if (hasTargetServer()) { + hash = (37 * hash) + TARGET_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getTargetServer().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AssignRegionStateData} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.AssignRegionStateData) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateDataOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + getTargetServerFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + transitionState_ = 1; + bitField0_ = (bitField0_ & ~0x00000001); + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + forceNewPlan_ = false; + bitField0_ = (bitField0_ & ~0x00000004); + if (targetServerBuilder_ == null) { + targetServer_ = null; + } else { + targetServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.transitionState_ = transitionState_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.forceNewPlan_ = forceNewPlan_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (targetServerBuilder_ == null) { + result.targetServer_ = targetServer_; + } else { + result.targetServer_ = targetServerBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.getDefaultInstance()) return this; + if (other.hasTransitionState()) { + setTransitionState(other.getTransitionState()); + } + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + if (other.hasForceNewPlan()) { + setForceNewPlan(other.getForceNewPlan()); + } + if (other.hasTargetServer()) { + mergeTargetServer(other.getTargetServer()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasTransitionState()) { + return false; + } + if (!hasRegionInfo()) { + return false; + } + if (!getRegionInfo().isInitialized()) { + return false; + } + if (hasTargetServer()) { + if (!getTargetServer().isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private int transitionState_ = 1; + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public boolean hasTransitionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState result = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(transitionState_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.REGION_TRANSITION_QUEUE : result; + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public Builder setTransitionState(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + transitionState_ = value.getNumber(); + onChanged(); + return this; + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public Builder clearTransitionState() { + bitField0_ = (bitField0_ & ~0x00000001); + transitionState_ = 1; + onChanged(); + return this; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + regionInfo_ != null && + regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getRegionInfo(), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + private boolean forceNewPlan_ ; + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean hasForceNewPlan() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean getForceNewPlan() { + return forceNewPlan_; + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public Builder setForceNewPlan(boolean value) { + bitField0_ |= 0x00000004; + forceNewPlan_ = value; + onChanged(); + return this; + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public Builder clearForceNewPlan() { + bitField0_ = (bitField0_ & ~0x00000004); + forceNewPlan_ = false; + onChanged(); + return this; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName targetServer_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> targetServerBuilder_; + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public boolean hasTargetServer() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer() { + if (targetServerBuilder_ == null) { + return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + } else { + return targetServerBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public Builder setTargetServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (targetServerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + targetServer_ = value; + onChanged(); + } else { + targetServerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public Builder setTargetServer( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (targetServerBuilder_ == null) { + targetServer_ = builderForValue.build(); + onChanged(); + } else { + targetServerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public Builder mergeTargetServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (targetServerBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + targetServer_ != null && + targetServer_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + targetServer_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(targetServer_).mergeFrom(value).buildPartial(); + } else { + targetServer_ = value; + } + onChanged(); + } else { + targetServerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public Builder clearTargetServer() { + if (targetServerBuilder_ == null) { + targetServer_ = null; + onChanged(); + } else { + targetServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getTargetServerBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getTargetServerFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder() { + if (targetServerBuilder_ != null) { + return targetServerBuilder_.getMessageOrBuilder(); + } else { + return targetServer_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + } + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getTargetServerFieldBuilder() { + if (targetServerBuilder_ == null) { + targetServerBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + getTargetServer(), + getParentForChildren(), + isClean()); + targetServer_ = null; + } + return targetServerBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.AssignRegionStateData) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AssignRegionStateData) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public AssignRegionStateData parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new AssignRegionStateData(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface UnassignRegionStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.UnassignRegionStateData) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + boolean hasTransitionState(); + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState(); + + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + boolean hasRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + boolean hasDestinationServer(); + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getDestinationServer(); + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder(); + + /** + * optional bool force = 4 [default = false]; + */ + boolean hasForce(); + /** + * optional bool force = 4 [default = false]; + */ + boolean getForce(); + } + /** + * Protobuf type {@code hbase.pb.UnassignRegionStateData} + */ + public static final class UnassignRegionStateData extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.UnassignRegionStateData) + UnassignRegionStateDataOrBuilder { + // Use UnassignRegionStateData.newBuilder() to construct. + private UnassignRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private UnassignRegionStateData() { + transitionState_ = 1; + force_ = false; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UnassignRegionStateData( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState value = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + transitionState_ = rawValue; + } + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = destinationServer_.toBuilder(); + } + destinationServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(destinationServer_); + destinationServer_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 32: { + bitField0_ |= 0x00000008; + force_ = input.readBool(); + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.Builder.class); + } + + private int bitField0_; + public static final int TRANSITION_STATE_FIELD_NUMBER = 1; + private int transitionState_; + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public boolean hasTransitionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState result = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(transitionState_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.REGION_TRANSITION_QUEUE : result; + } + + public static final int REGION_INFO_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + + public static final int DESTINATION_SERVER_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName destinationServer_; + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public boolean hasDestinationServer() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getDestinationServer() { + return destinationServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder() { + return destinationServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } + + public static final int FORCE_FIELD_NUMBER = 4; + private boolean force_; + /** + * optional bool force = 4 [default = false]; + */ + public boolean hasForce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool force = 4 [default = false]; + */ + public boolean getForce() { + return force_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasTransitionState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasDestinationServer()) { + if (!getDestinationServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, transitionState_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, getDestinationServer()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(4, force_); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(1, transitionState_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getDestinationServer()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(4, force_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData) obj; + + boolean result = true; + result = result && (hasTransitionState() == other.hasTransitionState()); + if (hasTransitionState()) { + result = result && transitionState_ == other.transitionState_; + } + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && (hasDestinationServer() == other.hasDestinationServer()); + if (hasDestinationServer()) { + result = result && getDestinationServer() + .equals(other.getDestinationServer()); + } + result = result && (hasForce() == other.hasForce()); + if (hasForce()) { + result = result && (getForce() + == other.getForce()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTransitionState()) { + hash = (37 * hash) + TRANSITION_STATE_FIELD_NUMBER; + hash = (53 * hash) + transitionState_; + } + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + if (hasDestinationServer()) { + hash = (37 * hash) + DESTINATION_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getDestinationServer().hashCode(); + } + if (hasForce()) { + hash = (37 * hash) + FORCE_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getForce()); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.UnassignRegionStateData} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.UnassignRegionStateData) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateDataOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + getDestinationServerFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + transitionState_ = 1; + bitField0_ = (bitField0_ & ~0x00000001); + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (destinationServerBuilder_ == null) { + destinationServer_ = null; + } else { + destinationServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + force_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionStateData_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.transitionState_ = transitionState_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (destinationServerBuilder_ == null) { + result.destinationServer_ = destinationServer_; + } else { + result.destinationServer_ = destinationServerBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.force_ = force_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData.getDefaultInstance()) return this; + if (other.hasTransitionState()) { + setTransitionState(other.getTransitionState()); + } + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + if (other.hasDestinationServer()) { + mergeDestinationServer(other.getDestinationServer()); + } + if (other.hasForce()) { + setForce(other.getForce()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasTransitionState()) { + return false; + } + if (!hasRegionInfo()) { + return false; + } + if (!getRegionInfo().isInitialized()) { + return false; + } + if (hasDestinationServer()) { + if (!getDestinationServer().isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private int transitionState_ = 1; + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public boolean hasTransitionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState result = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(transitionState_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.REGION_TRANSITION_QUEUE : result; + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public Builder setTransitionState(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + transitionState_ = value.getNumber(); + onChanged(); + return this; + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public Builder clearTransitionState() { + bitField0_ = (bitField0_ & ~0x00000001); + transitionState_ = 1; + onChanged(); + return this; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + regionInfo_ != null && + regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getRegionInfo(), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName destinationServer_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> destinationServerBuilder_; + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public boolean hasDestinationServer() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getDestinationServer() { + if (destinationServerBuilder_ == null) { + return destinationServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } else { + return destinationServerBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public Builder setDestinationServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (destinationServerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + destinationServer_ = value; + onChanged(); + } else { + destinationServerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public Builder setDestinationServer( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (destinationServerBuilder_ == null) { + destinationServer_ = builderForValue.build(); + onChanged(); + } else { + destinationServerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public Builder mergeDestinationServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (destinationServerBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + destinationServer_ != null && + destinationServer_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + destinationServer_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(destinationServer_).mergeFrom(value).buildPartial(); + } else { + destinationServer_ = value; + } + onChanged(); + } else { + destinationServerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public Builder clearDestinationServer() { + if (destinationServerBuilder_ == null) { + destinationServer_ = null; + onChanged(); + } else { + destinationServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getDestinationServerBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getDestinationServerFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder() { + if (destinationServerBuilder_ != null) { + return destinationServerBuilder_.getMessageOrBuilder(); + } else { + return destinationServer_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } + } + /** + * optional .hbase.pb.ServerName destination_server = 3; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getDestinationServerFieldBuilder() { + if (destinationServerBuilder_ == null) { + destinationServerBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + getDestinationServer(), + getParentForChildren(), + isClean()); + destinationServer_ = null; + } + return destinationServerBuilder_; + } + + private boolean force_ ; + /** + * optional bool force = 4 [default = false]; + */ + public boolean hasForce() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool force = 4 [default = false]; + */ + public boolean getForce() { + return force_; + } + /** + * optional bool force = 4 [default = false]; + */ + public Builder setForce(boolean value) { + bitField0_ |= 0x00000008; + force_ = value; + onChanged(); + return this; + } + /** + * optional bool force = 4 [default = false]; + */ + public Builder clearForce() { + bitField0_ = (bitField0_ & ~0x00000008); + force_ = false; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.UnassignRegionStateData) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.UnassignRegionStateData) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public UnassignRegionStateData parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new UnassignRegionStateData(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface MoveRegionStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.MoveRegionStateData) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + boolean hasRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + /** + * required .hbase.pb.ServerName source_server = 2; + */ + boolean hasSourceServer(); + /** + * required .hbase.pb.ServerName source_server = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getSourceServer(); + /** + * required .hbase.pb.ServerName source_server = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getSourceServerOrBuilder(); + + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + boolean hasDestinationServer(); + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getDestinationServer(); + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.MoveRegionStateData} + */ + public static final class MoveRegionStateData extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.MoveRegionStateData) + MoveRegionStateDataOrBuilder { + // Use MoveRegionStateData.newBuilder() to construct. + private MoveRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private MoveRegionStateData() { + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveRegionStateData( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = sourceServer_.toBuilder(); + } + sourceServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(sourceServer_); + sourceServer_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = destinationServer_.toBuilder(); + } + destinationServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(destinationServer_); + destinationServer_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MoveRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MoveRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.Builder.class); + } + + private int bitField0_; + public static final int REGION_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + + public static final int SOURCE_SERVER_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sourceServer_; + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public boolean hasSourceServer() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getSourceServer() { + return sourceServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : sourceServer_; + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getSourceServerOrBuilder() { + return sourceServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : sourceServer_; + } + + public static final int DESTINATION_SERVER_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName destinationServer_; + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public boolean hasDestinationServer() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getDestinationServer() { + return destinationServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder() { + return destinationServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSourceServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDestinationServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSourceServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getDestinationServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, getRegionInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getSourceServer()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, getDestinationServer()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(1, getRegionInfo()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getSourceServer()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(3, getDestinationServer()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData) obj; + + boolean result = true; + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && (hasSourceServer() == other.hasSourceServer()); + if (hasSourceServer()) { + result = result && getSourceServer() + .equals(other.getSourceServer()); + } + result = result && (hasDestinationServer() == other.hasDestinationServer()); + if (hasDestinationServer()) { + result = result && getDestinationServer() + .equals(other.getDestinationServer()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + if (hasSourceServer()) { + hash = (37 * hash) + SOURCE_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getSourceServer().hashCode(); + } + if (hasDestinationServer()) { + hash = (37 * hash) + DESTINATION_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getDestinationServer().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveRegionStateData} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.MoveRegionStateData) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateDataOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MoveRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MoveRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + getSourceServerFieldBuilder(); + getDestinationServerFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (sourceServerBuilder_ == null) { + sourceServer_ = null; + } else { + sourceServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (destinationServerBuilder_ == null) { + destinationServer_ = null; + } else { + destinationServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MoveRegionStateData_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (sourceServerBuilder_ == null) { + result.sourceServer_ = sourceServer_; + } else { + result.sourceServer_ = sourceServerBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (destinationServerBuilder_ == null) { + result.destinationServer_ = destinationServer_; + } else { + result.destinationServer_ = destinationServerBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData.getDefaultInstance()) return this; + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + if (other.hasSourceServer()) { + mergeSourceServer(other.getSourceServer()); + } + if (other.hasDestinationServer()) { + mergeDestinationServer(other.getDestinationServer()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (!hasRegionInfo()) { + return false; + } + if (!hasSourceServer()) { + return false; + } + if (!hasDestinationServer()) { + return false; + } + if (!getRegionInfo().isInitialized()) { + return false; + } + if (!getSourceServer().isInitialized()) { + return false; + } + if (!getDestinationServer().isInitialized()) { + return false; + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + regionInfo_ != null && + regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = null; + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo region_info = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + getRegionInfo(), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sourceServer_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> sourceServerBuilder_; + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public boolean hasSourceServer() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getSourceServer() { + if (sourceServerBuilder_ == null) { + return sourceServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : sourceServer_; + } else { + return sourceServerBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public Builder setSourceServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (sourceServerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + sourceServer_ = value; + onChanged(); + } else { + sourceServerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public Builder setSourceServer( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (sourceServerBuilder_ == null) { + sourceServer_ = builderForValue.build(); + onChanged(); + } else { + sourceServerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public Builder mergeSourceServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (sourceServerBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + sourceServer_ != null && + sourceServer_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + sourceServer_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(sourceServer_).mergeFrom(value).buildPartial(); + } else { + sourceServer_ = value; + } + onChanged(); + } else { + sourceServerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public Builder clearSourceServer() { + if (sourceServerBuilder_ == null) { + sourceServer_ = null; + onChanged(); + } else { + sourceServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getSourceServerBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getSourceServerFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getSourceServerOrBuilder() { + if (sourceServerBuilder_ != null) { + return sourceServerBuilder_.getMessageOrBuilder(); + } else { + return sourceServer_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : sourceServer_; + } + } + /** + * required .hbase.pb.ServerName source_server = 2; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getSourceServerFieldBuilder() { + if (sourceServerBuilder_ == null) { + sourceServerBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + getSourceServer(), + getParentForChildren(), + isClean()); + sourceServer_ = null; + } + return sourceServerBuilder_; + } + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName destinationServer_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> destinationServerBuilder_; + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public boolean hasDestinationServer() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getDestinationServer() { + if (destinationServerBuilder_ == null) { + return destinationServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } else { + return destinationServerBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public Builder setDestinationServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (destinationServerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + destinationServer_ = value; + onChanged(); + } else { + destinationServerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public Builder setDestinationServer( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (destinationServerBuilder_ == null) { + destinationServer_ = builderForValue.build(); + onChanged(); + } else { + destinationServerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public Builder mergeDestinationServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (destinationServerBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + destinationServer_ != null && + destinationServer_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + destinationServer_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(destinationServer_).mergeFrom(value).buildPartial(); + } else { + destinationServer_ = value; + } + onChanged(); + } else { + destinationServerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public Builder clearDestinationServer() { + if (destinationServerBuilder_ == null) { + destinationServer_ = null; + onChanged(); + } else { + destinationServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getDestinationServerBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getDestinationServerFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder() { + if (destinationServerBuilder_ != null) { + return destinationServerBuilder_.getMessageOrBuilder(); + } else { + return destinationServer_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : destinationServer_; + } + } + /** + * required .hbase.pb.ServerName destination_server = 3; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getDestinationServerFieldBuilder() { + if (destinationServerBuilder_ == null) { + destinationServerBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + getDestinationServer(), + getParentForChildren(), + isClean()); + destinationServer_ = null; + } + return destinationServerBuilder_; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveRegionStateData) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveRegionStateData) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public MoveRegionStateData parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new MoveRegionStateData(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_CreateTableStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_CreateTableStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ModifyTableStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ModifyTableStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TruncateTableStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_TruncateTableStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_DeleteTableStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_DeleteTableStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_CreateNamespaceStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_CreateNamespaceStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ModifyNamespaceStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ModifyNamespaceStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_DeleteNamespaceStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_DeleteNamespaceStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AddColumnFamilyStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_AddColumnFamilyStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ModifyColumnFamilyStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ModifyColumnFamilyStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_DeleteColumnFamilyStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_DeleteColumnFamilyStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_EnableTableStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_EnableTableStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_DisableTableStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_DisableTableStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RestoreParentToChildRegionsPair_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_CloneSnapshotStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_CloneSnapshotStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreSnapshotStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MergeTableRegionsStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitTableRegionStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_SplitTableRegionStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ServerCrashStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AssignRegionStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_UnassignRegionStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_UnassignRegionStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveRegionStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_MoveRegionStateData_fieldAccessorTable; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { java.lang.String[] descriptorData = { "\n\025MasterProcedure.proto\022\010hbase.pb\032\013HBase" + ".proto\032\tRPC.proto\"\234\001\n\024CreateTableStateDa" + @@ -24147,119 +27357,138 @@ public final class MasterProcedureProtos { "\003(\0132\024.hbase.pb.RegionInfo\022.\n\020regions_ass" + "igned\030\004 \003(\0132\024.hbase.pb.RegionInfo\022\025\n\rcar" + "rying_meta\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001", - "(\010:\004true*\330\001\n\020CreateTableState\022\036\n\032CREATE_" + - "TABLE_PRE_OPERATION\020\001\022 \n\034CREATE_TABLE_WR" + - "ITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_M" + - "ETA\020\003\022\037\n\033CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"" + - "\n\036CREATE_TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CR" + - "EATE_TABLE_POST_OPERATION\020\006*\207\002\n\020ModifyTa" + - "bleState\022\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MO" + - "DIFY_TABLE_PRE_OPERATION\020\002\022(\n$MODIFY_TAB" + - "LE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_T" + - "ABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_T", - "ABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_" + - "POST_OPERATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_" + - "ALL_REGIONS\020\007*\212\002\n\022TruncateTableState\022 \n\034" + - "TRUNCATE_TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCA" + - "TE_TABLE_REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_" + - "TABLE_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABL" + - "E_CREATE_FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_A" + - "DD_TO_META\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_RE" + - "GIONS\020\006\022!\n\035TRUNCATE_TABLE_POST_OPERATION" + - "\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TABLE_", - "PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOVE_F" + - "ROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYO" + - "UT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACHE\020\004" + - "\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033D" + - "ELETE_TABLE_POST_OPERATION\020\006*\320\001\n\024CreateN" + - "amespaceState\022\034\n\030CREATE_NAMESPACE_PREPAR" + - "E\020\001\022%\n!CREATE_NAMESPACE_CREATE_DIRECTORY" + - "\020\002\022)\n%CREATE_NAMESPACE_INSERT_INTO_NS_TA" + - "BLE\020\003\022\036\n\032CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n" + - "$CREATE_NAMESPACE_SET_NAMESPACE_QUOTA\020\005*", - "z\n\024ModifyNamespaceState\022\034\n\030MODIFY_NAMESP" + - "ACE_PREPARE\020\001\022$\n MODIFY_NAMESPACE_UPDATE" + - "_NS_TABLE\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_Z" + - "K\020\003*\332\001\n\024DeleteNamespaceState\022\034\n\030DELETE_N" + - "AMESPACE_PREPARE\020\001\022)\n%DELETE_NAMESPACE_D" + - "ELETE_FROM_NS_TABLE\020\002\022#\n\037DELETE_NAMESPAC" + - "E_REMOVE_FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_D" + - "ELETE_DIRECTORIES\020\004\022+\n\'DELETE_NAMESPACE_" + - "REMOVE_NAMESPACE_QUOTA\020\005*\331\001\n\024AddColumnFa" + - "milyState\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001", - "\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n" + - ")ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPT" + - "OR\020\003\022$\n ADD_COLUMN_FAMILY_POST_OPERATION" + - "\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIO" + - "NS\020\005*\353\001\n\027ModifyColumnFamilyState\022 \n\034MODI" + - "FY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COL" + - "UMN_FAMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COL" + - "UMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#" + - "MODIFY_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n" + - "\'MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS", - "\020\005*\226\002\n\027DeleteColumnFamilyState\022 \n\034DELETE" + - "_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUM" + - "N_FAMILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUM" + - "N_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DE" + - "LETE_COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n" + - "#DELETE_COLUMN_FAMILY_POST_OPERATION\020\005\022+" + - "\n\'DELETE_COLUMN_FAMILY_REOPEN_ALL_REGION" + - "S\020\006*\350\001\n\020EnableTableState\022\030\n\024ENABLE_TABLE" + - "_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION" + - "\020\002\022)\n%ENABLE_TABLE_SET_ENABLING_TABLE_ST", - "ATE\020\003\022$\n ENABLE_TABLE_MARK_REGIONS_ONLIN" + - "E\020\004\022(\n$ENABLE_TABLE_SET_ENABLED_TABLE_ST" + - "ATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPERATION\020\006*\362" + - "\001\n\021DisableTableState\022\031\n\025DISABLE_TABLE_PR" + - "EPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OPERATION\020\002" + - "\022+\n\'DISABLE_TABLE_SET_DISABLING_TABLE_ST" + - "ATE\020\003\022&\n\"DISABLE_TABLE_MARK_REGIONS_OFFL" + - "INE\020\004\022*\n&DISABLE_TABLE_SET_DISABLED_TABL" + - "E_STATE\020\005\022 \n\034DISABLE_TABLE_POST_OPERATIO" + - "N\020\006*\346\001\n\022CloneSnapshotState\022 \n\034CLONE_SNAP", - "SHOT_PRE_OPERATION\020\001\022\"\n\036CLONE_SNAPSHOT_W" + - "RITE_FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_T" + - "O_META\020\003\022!\n\035CLONE_SNAPSHOT_ASSIGN_REGION" + - "S\020\004\022$\n CLONE_SNAPSHOT_UPDATE_DESC_CACHE\020" + - "\005\022!\n\035CLONE_SNAPSHOT_POST_OPERATION\020\006*\260\001\n" + - "\024RestoreSnapshotState\022\"\n\036RESTORE_SNAPSHO" + - "T_PRE_OPERATION\020\001\022,\n(RESTORE_SNAPSHOT_UP" + - "DATE_TABLE_DESCRIPTOR\020\002\022$\n RESTORE_SNAPS" + - "HOT_WRITE_FS_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHO" + - "T_UPDATE_META\020\004*\376\003\n\026MergeTableRegionsSta", - "te\022\037\n\033MERGE_TABLE_REGIONS_PREPARE\020\001\022.\n*M" + - "ERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_R" + - "S\020\002\022+\n\'MERGE_TABLE_REGIONS_PRE_MERGE_OPE" + - "RATION\020\003\022/\n+MERGE_TABLE_REGIONS_SET_MERG" + - "ING_TABLE_STATE\020\004\022%\n!MERGE_TABLE_REGIONS" + - "_CLOSE_REGIONS\020\005\022,\n(MERGE_TABLE_REGIONS_" + - "CREATE_MERGED_REGION\020\006\0222\n.MERGE_TABLE_RE" + - "GIONS_PRE_MERGE_COMMIT_OPERATION\020\007\022#\n\037ME" + - "RGE_TABLE_REGIONS_UPDATE_META\020\010\0223\n/MERGE" + - "_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATI", - "ON\020\t\022*\n&MERGE_TABLE_REGIONS_OPEN_MERGED_" + - "REGION\020\n\022&\n\"MERGE_TABLE_REGIONS_POST_OPE" + - "RATION\020\013*\304\003\n\025SplitTableRegionState\022\036\n\032SP" + - "LIT_TABLE_REGION_PREPARE\020\001\022$\n SPLIT_TABL" + - "E_REGION_PRE_OPERATION\020\002\0220\n,SPLIT_TABLE_" + - "REGION_SET_SPLITTING_TABLE_STATE\020\003\022*\n&SP" + - "LIT_TABLE_REGION_CLOSE_PARENT_REGION\020\004\022." + - "\n*SPLIT_TABLE_REGION_CREATE_DAUGHTER_REG" + - "IONS\020\005\0220\n,SPLIT_TABLE_REGION_PRE_OPERATI" + - "ON_BEFORE_PONR\020\006\022\"\n\036SPLIT_TABLE_REGION_U", - "PDATE_META\020\007\022/\n+SPLIT_TABLE_REGION_PRE_O" + - "PERATION_AFTER_PONR\020\010\022)\n%SPLIT_TABLE_REG" + - "ION_OPEN_CHILD_REGIONS\020\t\022%\n!SPLIT_TABLE_" + - "REGION_POST_OPERATION\020\n*\234\002\n\020ServerCrashS" + - "tate\022\026\n\022SERVER_CRASH_START\020\001\022\035\n\031SERVER_C" + - "RASH_PROCESS_META\020\002\022\034\n\030SERVER_CRASH_GET_" + - "REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOGS\020" + - "\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERVER" + - "_CRASH_PREPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_CR" + - "ASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_ASS", - "IGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020dBR\n1org.ap" + - "ache.hadoop.hbase.shaded.protobuf.genera" + - "tedB\025MasterProcedureProtosH\001\210\001\001\240\001\001" + "(\010:\004true\"\311\001\n\025AssignRegionStateData\0229\n\020tr" + + "ansition_state\030\001 \002(\0162\037.hbase.pb.RegionTr" + + "ansitionState\022)\n\013region_info\030\002 \002(\0132\024.hba" + + "se.pb.RegionInfo\022\035\n\016force_new_plan\030\003 \001(\010" + + ":\005false\022+\n\rtarget_server\030\004 \001(\0132\024.hbase.p" + + "b.ServerName\"\307\001\n\027UnassignRegionStateData" + + "\0229\n\020transition_state\030\001 \002(\0162\037.hbase.pb.Re" + + "gionTransitionState\022)\n\013region_info\030\002 \002(\013" + + "2\024.hbase.pb.RegionInfo\0220\n\022destination_se" + + "rver\030\003 \001(\0132\024.hbase.pb.ServerName\022\024\n\005forc", + "e\030\004 \001(\010:\005false\"\237\001\n\023MoveRegionStateData\022)" + + "\n\013region_info\030\001 \002(\0132\024.hbase.pb.RegionInf" + + "o\022+\n\rsource_server\030\002 \002(\0132\024.hbase.pb.Serv" + + "erName\0220\n\022destination_server\030\003 \002(\0132\024.hba" + + "se.pb.ServerName*\330\001\n\020CreateTableState\022\036\n" + + "\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n\034CREATE_" + + "TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE_" + + "ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN_REG" + + "IONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_CACHE" + + "\020\005\022\037\n\033CREATE_TABLE_POST_OPERATION\020\006*\207\002\n\020", + "ModifyTableState\022\030\n\024MODIFY_TABLE_PREPARE" + + "\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(\n$MO" + + "DIFY_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n\"" + + "MODIFY_TABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n\035" + + "MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODIF" + + "Y_TABLE_POST_OPERATION\020\006\022#\n\037MODIFY_TABLE" + + "_REOPEN_ALL_REGIONS\020\007*\212\002\n\022TruncateTableS" + + "tate\022 \n\034TRUNCATE_TABLE_PRE_OPERATION\020\001\022#" + + "\n\037TRUNCATE_TABLE_REMOVE_FROM_META\020\002\022\"\n\036T" + + "RUNCATE_TABLE_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUNC", + "ATE_TABLE_CREATE_FS_LAYOUT\020\004\022\036\n\032TRUNCATE" + + "_TABLE_ADD_TO_META\020\005\022!\n\035TRUNCATE_TABLE_A" + + "SSIGN_REGIONS\020\006\022!\n\035TRUNCATE_TABLE_POST_O" + + "PERATION\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELET" + + "E_TABLE_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_" + + "REMOVE_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR" + + "_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC" + + "_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGION" + + "S\020\005\022\037\n\033DELETE_TABLE_POST_OPERATION\020\006*\320\001\n" + + "\024CreateNamespaceState\022\034\n\030CREATE_NAMESPAC", + "E_PREPARE\020\001\022%\n!CREATE_NAMESPACE_CREATE_D" + + "IRECTORY\020\002\022)\n%CREATE_NAMESPACE_INSERT_IN" + + "TO_NS_TABLE\020\003\022\036\n\032CREATE_NAMESPACE_UPDATE" + + "_ZK\020\004\022(\n$CREATE_NAMESPACE_SET_NAMESPACE_" + + "QUOTA\020\005*z\n\024ModifyNamespaceState\022\034\n\030MODIF" + + "Y_NAMESPACE_PREPARE\020\001\022$\n MODIFY_NAMESPAC" + + "E_UPDATE_NS_TABLE\020\002\022\036\n\032MODIFY_NAMESPACE_" + + "UPDATE_ZK\020\003*\332\001\n\024DeleteNamespaceState\022\034\n\030" + + "DELETE_NAMESPACE_PREPARE\020\001\022)\n%DELETE_NAM" + + "ESPACE_DELETE_FROM_NS_TABLE\020\002\022#\n\037DELETE_", + "NAMESPACE_REMOVE_FROM_ZK\020\003\022\'\n#DELETE_NAM" + + "ESPACE_DELETE_DIRECTORIES\020\004\022+\n\'DELETE_NA" + + "MESPACE_REMOVE_NAMESPACE_QUOTA\020\005*\331\001\n\024Add" + + "ColumnFamilyState\022\035\n\031ADD_COLUMN_FAMILY_P" + + "REPARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERAT" + + "ION\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_TABLE_" + + "DESCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_POST_O" + + "PERATION\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_A" + + "LL_REGIONS\020\005*\353\001\n\027ModifyColumnFamilyState" + + "\022 \n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"MO", + "DIFY_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,MO" + + "DIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPT" + + "OR\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_OPERAT" + + "ION\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOPEN_ALL" + + "_REGIONS\020\005*\226\002\n\027DeleteColumnFamilyState\022 " + + "\n\034DELETE_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DELE" + + "TE_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,DELE" + + "TE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR" + + "\020\003\022)\n%DELETE_COLUMN_FAMILY_DELETE_FS_LAY" + + "OUT\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST_OPERA", + "TION\020\005\022+\n\'DELETE_COLUMN_FAMILY_REOPEN_AL" + + "L_REGIONS\020\006*\350\001\n\020EnableTableState\022\030\n\024ENAB" + + "LE_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_O" + + "PERATION\020\002\022)\n%ENABLE_TABLE_SET_ENABLING_" + + "TABLE_STATE\020\003\022$\n ENABLE_TABLE_MARK_REGIO" + + "NS_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_ENABLED_" + + "TABLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPERA" + + "TION\020\006*\362\001\n\021DisableTableState\022\031\n\025DISABLE_" + + "TABLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OPE" + + "RATION\020\002\022+\n\'DISABLE_TABLE_SET_DISABLING_", + "TABLE_STATE\020\003\022&\n\"DISABLE_TABLE_MARK_REGI" + + "ONS_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET_DISAB" + + "LED_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE_POST_" + + "OPERATION\020\006*\346\001\n\022CloneSnapshotState\022 \n\034CL" + + "ONE_SNAPSHOT_PRE_OPERATION\020\001\022\"\n\036CLONE_SN" + + "APSHOT_WRITE_FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPSH" + + "OT_ADD_TO_META\020\003\022!\n\035CLONE_SNAPSHOT_ASSIG" + + "N_REGIONS\020\004\022$\n CLONE_SNAPSHOT_UPDATE_DES" + + "C_CACHE\020\005\022!\n\035CLONE_SNAPSHOT_POST_OPERATI" + + "ON\020\006*\260\001\n\024RestoreSnapshotState\022\"\n\036RESTORE", + "_SNAPSHOT_PRE_OPERATION\020\001\022,\n(RESTORE_SNA" + + "PSHOT_UPDATE_TABLE_DESCRIPTOR\020\002\022$\n RESTO" + + "RE_SNAPSHOT_WRITE_FS_LAYOUT\020\003\022 \n\034RESTORE" + + "_SNAPSHOT_UPDATE_META\020\004*\376\003\n\026MergeTableRe" + + "gionsState\022\037\n\033MERGE_TABLE_REGIONS_PREPAR" + + "E\020\001\022.\n*MERGE_TABLE_REGIONS_MOVE_REGION_T" + + "O_SAME_RS\020\002\022+\n\'MERGE_TABLE_REGIONS_PRE_M" + + "ERGE_OPERATION\020\003\022/\n+MERGE_TABLE_REGIONS_" + + "SET_MERGING_TABLE_STATE\020\004\022%\n!MERGE_TABLE" + + "_REGIONS_CLOSE_REGIONS\020\005\022,\n(MERGE_TABLE_", + "REGIONS_CREATE_MERGED_REGION\020\006\0222\n.MERGE_" + + "TABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION" + + "\020\007\022#\n\037MERGE_TABLE_REGIONS_UPDATE_META\020\010\022" + + "3\n/MERGE_TABLE_REGIONS_POST_MERGE_COMMIT" + + "_OPERATION\020\t\022*\n&MERGE_TABLE_REGIONS_OPEN" + + "_MERGED_REGION\020\n\022&\n\"MERGE_TABLE_REGIONS_" + + "POST_OPERATION\020\013*\304\003\n\025SplitTableRegionSta" + + "te\022\036\n\032SPLIT_TABLE_REGION_PREPARE\020\001\022$\n SP" + + "LIT_TABLE_REGION_PRE_OPERATION\020\002\0220\n,SPLI" + + "T_TABLE_REGION_SET_SPLITTING_TABLE_STATE", + "\020\003\022*\n&SPLIT_TABLE_REGION_CLOSE_PARENT_RE" + + "GION\020\004\022.\n*SPLIT_TABLE_REGION_CREATE_DAUG" + + "HTER_REGIONS\020\005\0220\n,SPLIT_TABLE_REGION_PRE" + + "_OPERATION_BEFORE_PONR\020\006\022\"\n\036SPLIT_TABLE_" + + "REGION_UPDATE_META\020\007\022/\n+SPLIT_TABLE_REGI" + + "ON_PRE_OPERATION_AFTER_PONR\020\010\022)\n%SPLIT_T" + + "ABLE_REGION_OPEN_CHILD_REGIONS\020\t\022%\n!SPLI" + + "T_TABLE_REGION_POST_OPERATION\020\n*\234\002\n\020Serv" + + "erCrashState\022\026\n\022SERVER_CRASH_START\020\001\022\035\n\031" + + "SERVER_CRASH_PROCESS_META\020\002\022\034\n\030SERVER_CR", + "ASH_GET_REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_SPL" + + "IT_LOGS\020\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020\005\022#" + + "\n\037SERVER_CRASH_PREPARE_LOG_REPLAY\020\006\022\027\n\023S" + + "ERVER_CRASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_WAI" + + "T_ON_ASSIGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020d*r" + + "\n\025RegionTransitionState\022\033\n\027REGION_TRANSI" + + "TION_QUEUE\020\001\022\036\n\032REGION_TRANSITION_DISPAT" + + "CH\020\002\022\034\n\030REGION_TRANSITION_FINISH\020\003*C\n\017Mo" + + "veRegionState\022\030\n\024MOVE_REGION_UNASSIGN\020\001\022" + + "\026\n\022MOVE_REGION_ASSIGN\020\002BR\n1org.apache.ha", + "doop.hbase.shaded.protobuf.generatedB\025Ma" + + "sterProcedureProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -24383,6 +27612,24 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerCrashStateData_descriptor, new java.lang.String[] { "ServerName", "DistributedLogReplay", "RegionsOnCrashedServer", "RegionsAssigned", "CarryingMeta", "ShouldSplitWal", }); + internal_static_hbase_pb_AssignRegionStateData_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_AssignRegionStateData_descriptor, + new java.lang.String[] { "TransitionState", "RegionInfo", "ForceNewPlan", "TargetServer", }); + internal_static_hbase_pb_UnassignRegionStateData_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_hbase_pb_UnassignRegionStateData_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_UnassignRegionStateData_descriptor, + new java.lang.String[] { "TransitionState", "RegionInfo", "DestinationServer", "Force", }); + internal_static_hbase_pb_MoveRegionStateData_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_hbase_pb_MoveRegionStateData_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_MoveRegionStateData_descriptor, + new java.lang.String[] { "RegionInfo", "SourceServer", "DestinationServer", }); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(); org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.getDescriptor(); } diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto index 338c80b..72f006e 100644 --- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto @@ -260,6 +260,16 @@ message GetRegionLoadResponse { repeated RegionLoad region_loads = 1; } +message ExecuteProceduresRequest { + repeated OpenRegionRequest open_region = 1; + repeated CloseRegionRequest close_region = 2; +} + +message ExecuteProceduresResponse { + repeated OpenRegionResponse open_region = 1; + repeated CloseRegionResponse close_region = 2; +} + service AdminService { rpc GetRegionInfo(GetRegionInfoRequest) returns(GetRegionInfoResponse); @@ -314,4 +324,7 @@ service AdminService { rpc GetRegionLoad(GetRegionLoadRequest) returns(GetRegionLoadResponse); + + rpc ExecuteProcedures(ExecuteProceduresRequest) + returns(ExecuteProceduresResponse); } diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index ef3f973..c888266 100644 --- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -323,3 +323,34 @@ enum ServerCrashState { SERVER_CRASH_WAIT_ON_ASSIGN = 9; SERVER_CRASH_FINISH = 100; } + +enum RegionTransitionState { + REGION_TRANSITION_QUEUE = 1; + REGION_TRANSITION_DISPATCH = 2; + REGION_TRANSITION_FINISH = 3; +} + +message AssignRegionStateData { + required RegionTransitionState transition_state = 1; + required RegionInfo region_info = 2; + optional bool force_new_plan = 3 [default = false]; + optional ServerName target_server = 4; +} + +message UnassignRegionStateData { + required RegionTransitionState transition_state = 1; + required RegionInfo region_info = 2; + optional ServerName destination_server = 3; + optional bool force = 4 [default = false]; +} + +enum MoveRegionState { + MOVE_REGION_UNASSIGN = 1; + MOVE_REGION_ASSIGN = 2; +} + +message MoveRegionStateData { + required RegionInfo region_info = 1; + required ServerName source_server = 2; + required ServerName destination_server = 3; +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java index 5a00ddb..8baf3c2 100644 --- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; @@ -124,10 +125,9 @@ public class RSGroupAdminServer implements RSGroupAdmin { addRegion(regions, el.getKey()); } } - for (RegionState state: - this.master.getAssignmentManager().getRegionStates().getRegionsInTransition()) { - if (state.getServerName().getAddress().equals(server)) { - addRegion(regions, state.getRegion()); + for (RegionStateNode state : master.getAssignmentManager().getRegionsInTransition()) { + if (state.getRegionLocation().getAddress().equals(server)) { + addRegion(regions, state.getRegionInfo()); } } return regions; @@ -401,7 +401,7 @@ public class RSGroupAdminServer implements RSGroupAdmin { LOG.info("RSGroup balance "+groupName+" starting with plan count: "+plans.size()); for (RegionPlan plan: plans) { LOG.info("balance " + plan); - assignmentManager.balance(plan); + assignmentManager.moveAsync(plan); } LOG.info("RSGroup balance "+groupName+" completed after "+ (System.currentTimeMillis()-startTime)+" seconds"); @@ -515,4 +515,4 @@ public class RSGroupAdminServer implements RSGroupAdmin { LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex); } } -} \ No newline at end of file +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java index b542fd3..4742bef 100644 --- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon index 76a85a9..da21bba 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon @@ -18,7 +18,7 @@ limitations under the License. <%import> org.apache.hadoop.hbase.HRegionInfo; -org.apache.hadoop.hbase.master.AssignmentManager; +org.apache.hadoop.hbase.master.assignment.AssignmentManager; org.apache.hadoop.hbase.master.RegionState; org.apache.hadoop.conf.Configuration; org.apache.hadoop.hbase.HBaseConfiguration; @@ -35,7 +35,7 @@ int limit = 100; <%java SortedSet rit = assignmentManager .getRegionStates().getRegionsInTransitionOrderedByTimestamp(); - Map failedRegionTracker = assignmentManager.getFailedOpenTracker(); + Map failedRegionTracker = assignmentManager.getFailedOpenTracker(); %> <%if !rit.isEmpty() %> @@ -99,8 +99,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / ritsPerPage); <%java> String retryStatus = "0"; - AtomicInteger numOpenRetries = failedRegionTracker.get( - rs.getRegion().getEncodedName()); + AtomicInteger numOpenRetries = failedRegionTracker.get(rs.getRegion()); if (numOpenRetries != null ) { retryStatus = Integer.toString(numOpenRetries.get()); } else if (rs.getState() == RegionState.State.FAILED_OPEN) { diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 36d5112..0e76455 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -41,7 +41,7 @@ org.apache.hadoop.hbase.TableName; org.apache.hadoop.hbase.client.Admin; org.apache.hadoop.hbase.client.MasterSwitchType; org.apache.hadoop.hbase.client.SnapshotDescription; -org.apache.hadoop.hbase.master.AssignmentManager; +org.apache.hadoop.hbase.master.assignment.AssignmentManager; org.apache.hadoop.hbase.master.DeadServer; org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.RegionState; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java index ed1ae31..4f134c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/VersionInfoUtil.java @@ -35,9 +35,7 @@ public final class VersionInfoUtil { } public static boolean currentClientHasMinimumVersion(int major, int minor) { - RpcCallContext call = RpcServer.getCurrentCall(); - HBaseProtos.VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null; - return hasMinimumVersion(versionInfo, major, minor); + return hasMinimumVersion(getCurrentClientVersionInfo(), major, minor); } public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo, @@ -53,7 +51,7 @@ public final class VersionInfoUtil { return clientMinor >= minor; } try { - String[] components = versionInfo.getVersion().split("\\."); + final String[] components = getVersionComponents(versionInfo); int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; if (clientMajor != major) { @@ -68,4 +66,79 @@ public final class VersionInfoUtil { } return false; } + + /** + * @return the versionInfo extracted from the current RpcCallContext + */ + private static HBaseProtos.VersionInfo getCurrentClientVersionInfo() { + RpcCallContext call = RpcServer.getCurrentCall(); + return call != null ? call.getClientVersionInfo() : null; + } + + /** + * @return the version number extracted from the current RpcCallContext as int. + * (e.g. 0x0103004 is 1.3.4) + */ + public static int getCurrentClientVersionNumber() { + return getVersionNumber(getCurrentClientVersionInfo()); + } + + + /** + * @param version + * @return the passed-in version int as a version String + * (e.g. 0x0103004 is 1.3.4) + */ + public static String versionNumberToString(final int version) { + return String.format("%d.%d.%d", + ((version >> 20) & 0xff), + ((version >> 12) & 0xff), + (version & 0xfff)); + } + + /** + * Pack the full number version in a int. by shifting each component by 8bit, + * except the dot release which has 12bit. + * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * @param versionInfo the VersionInfo object to pack + * @return the version number as int. (e.g. 0x0103004 is 1.3.4) + */ + private static int getVersionNumber(final HBaseProtos.VersionInfo versionInfo) { + if (versionInfo != null) { + try { + final String[] components = getVersionComponents(versionInfo); + int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; + int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0; + int clientPatch = components.length > 2 ? Integer.parseInt(components[2]) : 0; + return buildVersionNumber(clientMajor, clientMinor, clientPatch); + } catch (NumberFormatException e) { + int clientMajor = versionInfo.hasVersionMajor() ? versionInfo.getVersionMajor() : 0; + int clientMinor = versionInfo.hasVersionMinor() ? versionInfo.getVersionMinor() : 0; + return buildVersionNumber(clientMajor, clientMinor, 0); + } + } + return(0); // no version + } + + /** + * Pack the full number version in a int. by shifting each component by 8bit, + * except the dot release which has 12bit. + * Examples: (1.3.4 is 0x0103004, 2.1.0 is 0x0201000) + * @param major version major number + * @param minor version minor number + * @param patch version patch number + * @return the version number as int. (e.g. 0x0103004 is 1.3.4) + */ + private static int buildVersionNumber(int major, int minor, int patch) { + return (major << 20) | (minor << 12) | patch; + } + + /** + * Returns the version components + * Examples: "1.2.3" returns [1, 2, 3], "4.5.6-SNAPSHOT" returns [4, 5, 6, "SNAPSHOT"] + * @returns the components of the version string + */ + private static String[] getVersionComponents(final HBaseProtos.VersionInfo versionInfo) { + return versionInfo.getVersion().split("[\\.-]"); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java index f792b36..558c9c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java @@ -17,11 +17,8 @@ */ package org.apache.hadoop.hbase.ipc; -import java.util.List; import java.util.concurrent.BlockingQueue; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java index ab16627..b3df28a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FastPathBalancedQueueRpcExecutor.java @@ -120,4 +120,4 @@ public class FastPathBalancedQueueRpcExecutor extends BalancedQueueRpcExecutor { return true; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java index 616f741..bfbb803 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.ipc; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -37,8 +35,6 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceStability.Evolving public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObserver { - private static final Log LOG = LogFactory.getLog(SimpleRpcScheduler.class); - private int port; private final PriorityFunction priority; private final RpcExecutor callExecutor; @@ -82,14 +78,14 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs if (callqReadShare > 0) { // at least 1 read handler and 1 write handler - callExecutor = new RWQueueRpcExecutor("deafult.RWQ", Math.max(2, handlerCount), + callExecutor = new RWQueueRpcExecutor("default.RWQ", Math.max(2, handlerCount), maxQueueLength, priority, conf, server); } else { if (RpcExecutor.isFifoQueueType(callQueueType)) { - callExecutor = new FastPathBalancedQueueRpcExecutor("deafult.FPBQ", handlerCount, + callExecutor = new FastPathBalancedQueueRpcExecutor("default.FPBQ", handlerCount, maxQueueLength, priority, conf, server); } else { - callExecutor = new BalancedQueueRpcExecutor("deafult.BQ", handlerCount, maxQueueLength, + callExecutor = new BalancedQueueRpcExecutor("default.BQ", handlerCount, maxQueueLength, priority, conf, server); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java index 075d8b8..12742db 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java @@ -1980,8 +1980,8 @@ public class SimpleRpcServer extends RpcServer { if (!running) { return; } - if (LOG.isDebugEnabled()) { - LOG.debug(Thread.currentThread().getName()+": task running"); + if (LOG.isTraceEnabled()) { + LOG.trace("running"); } try { closeIdle(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java deleted file mode 100644 index 4513a5d..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignCallable.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; - -/** - * A callable object that invokes the corresponding action that needs to be - * taken for assignment of a region in transition. - * Implementing as future callable we are able to act on the timeout - * asynchronously. - */ -@InterfaceAudience.Private -public class AssignCallable implements Callable { - private AssignmentManager assignmentManager; - - private HRegionInfo hri; - - public AssignCallable( - AssignmentManager assignmentManager, HRegionInfo hri) { - this.assignmentManager = assignmentManager; - this.hri = hri; - } - - @Override - public Object call() throws Exception { - assignmentManager.assign(hri); - return null; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java deleted file mode 100644 index 60ad545..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ /dev/null @@ -1,3057 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import com.google.common.annotations.VisibleForTesting; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.RegionStateListener; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.MasterSwitchType; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.executor.EventHandler; -import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.executor.ExecutorService; -import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; -import org.apache.hadoop.hbase.ipc.FailedServerException; -import org.apache.hadoop.hbase.ipc.RpcClient; -import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.quotas.QuotaExceededException; -import org.apache.hadoop.hbase.regionserver.RegionOpeningState; -import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.KeyLocker; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.hadoop.hbase.util.RetryCounter; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.util.StringUtils; -import org.apache.zookeeper.KeeperException; - -/** - * Manages and performs region assignment. - * Related communications with regionserver are all done over RPC. - */ -@InterfaceAudience.Private -public class AssignmentManager { - private static final Log LOG = LogFactory.getLog(AssignmentManager.class); - - protected final MasterServices server; - - private ServerManager serverManager; - - private boolean shouldAssignRegionsWithFavoredNodes; - - private LoadBalancer balancer; - - private final MetricsAssignmentManager metricsAssignmentManager; - - private AtomicInteger numRegionsOpened = new AtomicInteger(0); - - final private KeyLocker locker = new KeyLocker(); - - Set replicasToClose = Collections.synchronizedSet(new HashSet()); - - /** - * Map of regions to reopen after the schema of a table is changed. Key - - * encoded region name, value - HRegionInfo - */ - private final Map regionsToReopen; - - /* - * Maximum times we recurse an assignment/unassignment. - * See below in {@link #assign()} and {@link #unassign()}. - */ - private final int maximumAttempts; - - /** - * The sleep time for which the assignment will wait before retrying in case of - * hbase:meta assignment failure due to lack of availability of region plan or bad region plan - */ - private final long sleepTimeBeforeRetryingMetaAssignment; - - /** Plans for region movement. Key is the encoded version of a region name*/ - // TODO: When do plans get cleaned out? Ever? In server open and in server - // shutdown processing -- St.Ack - // All access to this Map must be synchronized. - final NavigableMap regionPlans = - new TreeMap(); - - private final TableStateManager tableStateManager; - - private final ExecutorService executorService; - - private java.util.concurrent.ExecutorService threadPoolExecutorService; - private ScheduledThreadPoolExecutor scheduledThreadPoolExecutor; - - private final RegionStates regionStates; - - // The threshold to use bulk assigning. Using bulk assignment - // only if assigning at least this many regions to at least this - // many servers. If assigning fewer regions to fewer servers, - // bulk assigning may be not as efficient. - private final int bulkAssignThresholdRegions; - private final int bulkAssignThresholdServers; - private final int bulkPerRegionOpenTimeGuesstimate; - - // Should bulk assignment wait till all regions are assigned, - // or it is timed out? This is useful to measure bulk assignment - // performance, but not needed in most use cases. - private final boolean bulkAssignWaitTillAllAssigned; - - /** - * Indicator that AssignmentManager has recovered the region states so - * that ServerShutdownHandler can be fully enabled and re-assign regions - * of dead servers. So that when re-assignment happens, AssignmentManager - * has proper region states. - * - * Protected to ease testing. - */ - protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false); - - /** - * A map to track the count a region fails to open in a row. - * So that we don't try to open a region forever if the failure is - * unrecoverable. We don't put this information in region states - * because we don't expect this to happen frequently; we don't - * want to copy this information over during each state transition either. - */ - private final ConcurrentHashMap - failedOpenTracker = new ConcurrentHashMap(); - - // In case not using ZK for region assignment, region states - // are persisted in meta with a state store - private final RegionStateStore regionStateStore; - - /** - * For testing only! Set to true to skip handling of split. - */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL") - public static boolean TEST_SKIP_SPLIT_HANDLING = false; - - /** Listeners that are called on assignment events. */ - private List listeners = new CopyOnWriteArrayList(); - - private RegionStateListener regionStateListener; - - private RetryCounter.BackoffPolicy backoffPolicy; - private RetryCounter.RetryConfig retryConfig; - /** - * Constructs a new assignment manager. - * - * @param server instance of HMaster this AM running inside - * @param serverManager serverManager for associated HMaster - * @param balancer implementation of {@link LoadBalancer} - * @param service Executor service - * @param metricsMaster metrics manager - * @throws IOException - */ - public AssignmentManager(MasterServices server, ServerManager serverManager, - final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster, - final TableStateManager tableStateManager) - throws IOException { - this.server = server; - this.serverManager = serverManager; - this.executorService = service; - this.regionStateStore = new RegionStateStore(server); - this.regionsToReopen = Collections.synchronizedMap - (new HashMap ()); - Configuration conf = server.getConfiguration(); - - this.tableStateManager = tableStateManager; - - // This is the max attempts, not retries, so it should be at least 1. - this.maximumAttempts = Math.max(1, - this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); - this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong( - "hbase.meta.assignment.retry.sleeptime", 1000l); - this.balancer = balancer; - // Only read favored nodes if using the favored nodes load balancer. - this.shouldAssignRegionsWithFavoredNodes = this.balancer instanceof FavoredNodesPromoter; - int maxThreads = conf.getInt("hbase.assignment.threads.max", 30); - - this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool( - maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM.")); - - this.scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1, - Threads.newDaemonThreadFactory("AM.Scheduler")); - - this.regionStates = new RegionStates( - server, tableStateManager, serverManager, regionStateStore); - - this.bulkAssignWaitTillAllAssigned = - conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false); - this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7); - this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3); - this.bulkPerRegionOpenTimeGuesstimate = - conf.getInt("hbase.bulk.assignment.perregion.open.time", 10000); - - this.metricsAssignmentManager = new MetricsAssignmentManager(); - - // Configurations for retrying opening a region on receiving a FAILED_OPEN - this.retryConfig = new RetryCounter.RetryConfig(); - this.retryConfig.setSleepInterval(conf.getLong("hbase.assignment.retry.sleep.initial", 0l)); - // Set the max time limit to the initial sleep interval so we use a constant time sleep strategy - // if the user does not set a max sleep limit - this.retryConfig.setMaxSleepTime(conf.getLong("hbase.assignment.retry.sleep.max", - retryConfig.getSleepInterval())); - this.backoffPolicy = getBackoffPolicy(); - } - - /** - * Returns the backoff policy used for Failed Region Open retries - * @return the backoff policy used for Failed Region Open retries - */ - RetryCounter.BackoffPolicy getBackoffPolicy() { - return new RetryCounter.ExponentialBackoffPolicyWithLimit(); - } - - MetricsAssignmentManager getAssignmentManagerMetrics() { - return this.metricsAssignmentManager; - } - - /** - * Add the listener to the notification list. - * @param listener The AssignmentListener to register - */ - public void registerListener(final AssignmentListener listener) { - this.listeners.add(listener); - } - - /** - * Remove the listener from the notification list. - * @param listener The AssignmentListener to unregister - */ - public boolean unregisterListener(final AssignmentListener listener) { - return this.listeners.remove(listener); - } - - /** - * @return Instance of ZKTableStateManager. - */ - public TableStateManager getTableStateManager() { - // These are 'expensive' to make involving trip to zk ensemble so allow - // sharing. - return this.tableStateManager; - } - - /** - * This SHOULD not be public. It is public now - * because of some unit tests. - * - * TODO: make it package private and keep RegionStates in the master package - */ - public RegionStates getRegionStates() { - return regionStates; - } - - /** - * Used in some tests to mock up region state in meta - */ - @VisibleForTesting - RegionStateStore getRegionStateStore() { - return regionStateStore; - } - - public RegionPlan getRegionReopenPlan(HRegionInfo hri) { - return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri)); - } - - /** - * Add a regionPlan for the specified region. - * @param encodedName - * @param plan - */ - public void addPlan(String encodedName, RegionPlan plan) { - synchronized (regionPlans) { - regionPlans.put(encodedName, plan); - } - } - - /** - * Add a map of region plans. - */ - public void addPlans(Map plans) { - synchronized (regionPlans) { - regionPlans.putAll(plans); - } - } - - /** - * Set the list of regions that will be reopened - * because of an update in table schema - * - * @param regions - * list of regions that should be tracked for reopen - */ - public void setRegionsToReopen(List regions) { - for(HRegionInfo hri : regions) { - regionsToReopen.put(hri.getEncodedName(), hri); - } - } - - /** - * Used by the client to identify if all regions have the schema updates - * - * @param tableName - * @return Pair indicating the status of the alter command - * @throws IOException - */ - public Pair getReopenStatus(TableName tableName) - throws IOException { - List hris; - if (TableName.META_TABLE_NAME.equals(tableName)) { - hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper()); - } else { - hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName, true); - } - - Integer pending = 0; - for (HRegionInfo hri : hris) { - String name = hri.getEncodedName(); - // no lock concurrent access ok: sequential consistency respected. - if (regionsToReopen.containsKey(name) - || regionStates.isRegionInTransition(name)) { - pending++; - } - } - return new Pair(pending, hris.size()); - } - - /** - * Used by ServerShutdownHandler to make sure AssignmentManager has completed - * the failover cleanup before re-assigning regions of dead servers. So that - * when re-assignment happens, AssignmentManager has proper region states. - */ - public boolean isFailoverCleanupDone() { - return failoverCleanupDone.get(); - } - - /** - * To avoid racing with AM, external entities may need to lock a region, - * for example, when SSH checks what regions to skip re-assigning. - */ - public Lock acquireRegionLock(final String encodedName) { - return locker.acquireLock(encodedName); - } - - /** - * Now, failover cleanup is completed. Notify server manager to - * process queued up dead servers processing, if any. - */ - void failoverCleanupDone() { - failoverCleanupDone.set(true); - serverManager.processQueuedDeadServers(); - } - - /** - * Called on startup. - * Figures whether a fresh cluster start of we are joining extant running cluster. - * @throws IOException - * @throws KeeperException - * @throws InterruptedException - * @throws CoordinatedStateException - */ - void joinCluster() - throws IOException, KeeperException, InterruptedException, CoordinatedStateException { - long startTime = System.currentTimeMillis(); - // Concurrency note: In the below the accesses on regionsInTransition are - // outside of a synchronization block where usually all accesses to RIT are - // synchronized. The presumption is that in this case it is safe since this - // method is being played by a single thread on startup. - - // TODO: Regions that have a null location and are not in regionsInTransitions - // need to be handled. - - // Scan hbase:meta to build list of existing regions, servers, and assignment - // Returns servers who have not checked in (assumed dead) that some regions - // were assigned to (according to the meta) - Set deadServers = rebuildUserRegions(); - - // This method will assign all user regions if a clean server startup or - // it will reconstruct master state and cleanup any leftovers from previous master process. - boolean failover = processDeadServersAndRegionsInTransition(deadServers); - - LOG.info("Joined the cluster in " + (System.currentTimeMillis() - - startTime) + "ms, failover=" + failover); - } - - /** - * Process all regions that are in transition in zookeeper and also - * processes the list of dead servers. - * Used by master joining an cluster. If we figure this is a clean cluster - * startup, will assign all user regions. - * @param deadServers Set of servers that are offline probably legitimately that were carrying - * regions according to a scan of hbase:meta. Can be null. - * @throws IOException - * @throws InterruptedException - */ - boolean processDeadServersAndRegionsInTransition(final Set deadServers) - throws KeeperException, IOException, InterruptedException, CoordinatedStateException { - // TODO Needed? List nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode); - boolean failover = !serverManager.getDeadServers().isEmpty(); - if (failover) { - // This may not be a failover actually, especially if meta is on this master. - if (LOG.isDebugEnabled()) { - LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers()); - } - // Check if there are any regions on these servers - failover = false; - for (ServerName serverName : serverManager.getDeadServers().copyServerNames()) { - if (regionStates.getRegionAssignments().values().contains(serverName)) { - LOG.debug("Found regions on dead server: " + serverName); - failover = true; - break; - } - } - } - Set onlineServers = serverManager.getOnlineServers().keySet(); - if (!failover) { - // If any one region except meta is assigned, it's a failover. - for (Map.Entry en: - regionStates.getRegionAssignments().entrySet()) { - HRegionInfo hri = en.getKey(); - if (!hri.isMetaTable() - && onlineServers.contains(en.getValue())) { - LOG.debug("Found region " + hri + " out on cluster"); - failover = true; - break; - } - } - } - if (!failover) { - // If any region except meta is in transition on a live server, it's a failover. - Set regionsInTransition = regionStates.getRegionsInTransition(); - if (!regionsInTransition.isEmpty()) { - for (RegionState regionState: regionsInTransition) { - ServerName serverName = regionState.getServerName(); - if (!regionState.getRegion().isMetaRegion() - && serverName != null && onlineServers.contains(serverName)) { - LOG.debug("Found " + regionState + " for region " + - regionState.getRegion().getRegionNameAsString() + " for server " + - serverName + "in RITs"); - failover = true; - break; - } - } - } - } - if (!failover) { - // If we get here, we have a full cluster restart. It is a failover only - // if there are some WALs are not split yet. For meta WALs, they should have - // been split already, if any. We can walk through those queued dead servers, - // if they don't have any WALs, this restart should be considered as a clean one - Set queuedDeadServers = serverManager.getRequeuedDeadServers().keySet(); - if (!queuedDeadServers.isEmpty()) { - Configuration conf = server.getConfiguration(); - Path walRootDir = FSUtils.getWALRootDir(conf); - FileSystem walFs = FSUtils.getWALFileSystem(conf); - for (ServerName serverName: queuedDeadServers) { - // In the case of a clean exit, the shutdown handler would have presplit any WALs and - // removed empty directories. - Path walDir = new Path(walRootDir, - AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); - Path splitDir = walDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); - if (checkWals(walFs, walDir) || checkWals(walFs, splitDir)) { - LOG.debug("Found queued dead server " + serverName); - failover = true; - break; - } - } - if (!failover) { - // We figured that it's not a failover, so no need to - // work on these re-queued dead servers any more. - LOG.info("AM figured that it's not a failover and cleaned up " - + queuedDeadServers.size() + " queued dead servers"); - serverManager.removeRequeuedDeadServers(); - } - } - } - - Set disabledOrDisablingOrEnabling = null; - Map allRegions = null; - - if (!failover) { - disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - TableState.State.DISABLED, TableState.State.DISABLING, - TableState.State.ENABLING); - - // Clean re/start, mark all user regions closed before reassignment - allRegions = regionStates.closeAllUserRegions( - disabledOrDisablingOrEnabling); - } - - // Now region states are restored - regionStateStore.start(); - - if (failover) { - if (deadServers != null && !deadServers.isEmpty()) { - for (ServerName serverName: deadServers) { - if (!serverManager.isServerDead(serverName)) { - serverManager.expireServer(serverName); // Let SSH do region re-assign - } - } - } - processRegionsInTransition(regionStates.getRegionsInTransition()); - } - - // Now we can safely claim failover cleanup completed and enable - // ServerShutdownHandler for further processing. The nodes (below) - // in transition, if any, are for regions not related to those - // dead servers at all, and can be done in parallel to SSH. - failoverCleanupDone(); - if (!failover) { - // Fresh cluster startup. - LOG.info("Clean cluster startup. Don't reassign user regions"); - assignAllUserRegions(allRegions); - } else { - LOG.info("Failover! Reassign user regions"); - } - // unassign replicas of the split parents and the merged regions - // the daughter replicas are opened in assignAllUserRegions if it was - // not already opened. - for (HRegionInfo h : replicasToClose) { - unassign(h); - } - replicasToClose.clear(); - return failover; - } - - private boolean checkWals(FileSystem fs, Path dir) throws IOException { - if (!fs.exists(dir)) { - LOG.debug(dir + " doesn't exist"); - return false; - } - if (!fs.getFileStatus(dir).isDirectory()) { - LOG.warn(dir + " is not a directory"); - return false; - } - FileStatus[] files = FSUtils.listStatus(fs, dir); - if (files == null || files.length == 0) { - LOG.debug(dir + " has no files"); - return false; - } - for (int i = 0; i < files.length; i++) { - if (files[i].isFile() && files[i].getLen() > 0) { - LOG.debug(dir + " has a non-empty file: " + files[i].getPath()); - return true; - } else if (files[i].isDirectory() && checkWals(fs, files[i].getPath())) { - LOG.debug(dir + " is a directory and has a non-empty file: " + files[i].getPath()); - return true; - } - } - LOG.debug("Found 0 non-empty wal files for :" + dir); - return false; - } - - /** - * When a region is closed, it should be removed from the regionsToReopen - * @param hri HRegionInfo of the region which was closed - */ - public void removeClosedRegion(HRegionInfo hri) { - if (regionsToReopen.remove(hri.getEncodedName()) != null) { - LOG.debug("Removed region from reopening regions because it was closed"); - } - } - - void processFavoredNodesForDaughters(HRegionInfo parent, - HRegionInfo regionA, HRegionInfo regionB) throws IOException { - if (shouldAssignFavoredNodes(parent)) { - List onlineServers = this.serverManager.getOnlineServersList(); - ((FavoredNodesPromoter) this.balancer). - generateFavoredNodesForDaughter(onlineServers, parent, regionA, regionB); - } - } - - void processFavoredNodesForMerge(HRegionInfo merged, HRegionInfo regionA, HRegionInfo regionB) - throws IOException { - if (shouldAssignFavoredNodes(merged)) { - ((FavoredNodesPromoter)this.balancer). - generateFavoredNodesForMergedRegion(merged, regionA, regionB); - } - } - - /* - * Favored nodes should be applied only when FavoredNodes balancer is configured and the region - * belongs to a non-system table. - */ - private boolean shouldAssignFavoredNodes(HRegionInfo region) { - return this.shouldAssignRegionsWithFavoredNodes - && FavoredNodesManager.isFavoredNodeApplicable(region); - } - - /** - * Marks the region as online. Removes it from regions in transition and - * updates the in-memory assignment information. - *

- * Used when a region has been successfully opened on a region server. - * @param regionInfo - * @param sn - */ - void regionOnline(HRegionInfo regionInfo, ServerName sn) { - regionOnline(regionInfo, sn, HConstants.NO_SEQNUM); - } - - void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) { - numRegionsOpened.incrementAndGet(); - regionStates.regionOnline(regionInfo, sn, openSeqNum); - - // Remove plan if one. - clearRegionPlan(regionInfo); - balancer.regionOnline(regionInfo, sn); - - // Tell our listeners that a region was opened - sendRegionOpenedNotification(regionInfo, sn); - } - - /** - * Marks the region as offline. Removes it from regions in transition and - * removes in-memory assignment information. - *

- * Used when a region has been closed and should remain closed. - * @param regionInfo - */ - public void regionOffline(final HRegionInfo regionInfo) { - regionOffline(regionInfo, null); - } - - public void offlineDisabledRegion(HRegionInfo regionInfo) { - replicasToClose.remove(regionInfo); - regionOffline(regionInfo); - } - - // Assignment methods - - /** - * Assigns the specified region. - *

- * If a RegionPlan is available with a valid destination then it will be used - * to determine what server region is assigned to. If no RegionPlan is - * available, region will be assigned to a random available server. - *

- * Updates the RegionState and sends the OPEN RPC. - *

- * This will only succeed if the region is in transition and in a CLOSED or - * OFFLINE state or not in transition, and of course, the - * chosen server is up and running (It may have just crashed!). - * - * @param region server to be assigned - */ - public void assign(HRegionInfo region) { - assign(region, false); - } - - /** - * Use care with forceNewPlan. It could cause double assignment. - */ - public void assign(HRegionInfo region, boolean forceNewPlan) { - if (isDisabledorDisablingRegionInRIT(region)) { - return; - } - String encodedName = region.getEncodedName(); - Lock lock = locker.acquireLock(encodedName); - try { - RegionState state = forceRegionStateToOffline(region, forceNewPlan); - if (state != null) { - if (regionStates.wasRegionOnDeadServer(encodedName)) { - LOG.info("Skip assigning " + region.getRegionNameAsString() - + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) - + " is dead but not processed yet"); - return; - } - assign(state, forceNewPlan); - } - } finally { - lock.unlock(); - } - } - - /** - * Bulk assign regions to destination. - * @param destination - * @param regions Regions to assign. - * @return true if successful - */ - boolean assign(final ServerName destination, final List regions) - throws InterruptedException { - long startTime = EnvironmentEdgeManager.currentTime(); - try { - int regionCount = regions.size(); - if (regionCount == 0) { - return true; - } - LOG.info("Assigning " + regionCount + " region(s) to " + destination.toString()); - Set encodedNames = new HashSet(regionCount); - for (HRegionInfo region : regions) { - encodedNames.add(region.getEncodedName()); - } - - List failedToOpenRegions = new ArrayList(); - Map locks = locker.acquireLocks(encodedNames); - try { - Map plans = new HashMap(regionCount); - List states = new ArrayList(regionCount); - for (HRegionInfo region : regions) { - String encodedName = region.getEncodedName(); - if (!isDisabledorDisablingRegionInRIT(region)) { - RegionState state = forceRegionStateToOffline(region, false); - boolean onDeadServer = false; - if (state != null) { - if (regionStates.wasRegionOnDeadServer(encodedName)) { - LOG.info("Skip assigning " + region.getRegionNameAsString() - + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) - + " is dead but not processed yet"); - onDeadServer = true; - } else { - RegionPlan plan = new RegionPlan(region, state.getServerName(), destination); - plans.put(encodedName, plan); - states.add(state); - continue; - } - } - // Reassign if the region wasn't on a dead server - if (!onDeadServer) { - LOG.info("failed to force region state to offline, " - + "will reassign later: " + region); - failedToOpenRegions.add(region); // assign individually later - } - } - // Release the lock, this region is excluded from bulk assign because - // we can't update its state, or set its znode to offline. - Lock lock = locks.remove(encodedName); - lock.unlock(); - } - - if (server.isStopped()) { - return false; - } - - // Add region plans, so we can updateTimers when one region is opened so - // that unnecessary timeout on RIT is reduced. - this.addPlans(plans); - - List>> regionOpenInfos = - new ArrayList>>(states.size()); - for (RegionState state: states) { - HRegionInfo region = state.getRegion(); - regionStates.updateRegionState( - region, State.PENDING_OPEN, destination); - List favoredNodes = ServerName.EMPTY_SERVER_LIST; - if (shouldAssignFavoredNodes(region)) { - favoredNodes = server.getFavoredNodesManager().getFavoredNodesWithDNPort(region); - } - regionOpenInfos.add(new Pair>( - region, favoredNodes)); - } - - // Move on to open regions. - try { - // Send OPEN RPC. If it fails on a IOE or RemoteException, - // regions will be assigned individually. - Configuration conf = server.getConfiguration(); - long maxWaitTime = System.currentTimeMillis() + - conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000); - for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) { - try { - List regionOpeningStateList = serverManager - .sendRegionOpen(destination, regionOpenInfos); - for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) { - RegionOpeningState openingState = regionOpeningStateList.get(k); - if (openingState != RegionOpeningState.OPENED) { - HRegionInfo region = regionOpenInfos.get(k).getFirst(); - LOG.info("Got opening state " + openingState - + ", will reassign later: " + region); - // Failed opening this region, reassign it later - forceRegionStateToOffline(region, true); - failedToOpenRegions.add(region); - } - } - break; - } catch (IOException e) { - if (e instanceof RemoteException) { - e = ((RemoteException)e).unwrapRemoteException(); - } - if (e instanceof RegionServerStoppedException) { - LOG.warn("The region server was shut down, ", e); - // No need to retry, the region server is a goner. - return false; - } else if (e instanceof ServerNotRunningYetException) { - long now = System.currentTimeMillis(); - if (now < maxWaitTime) { - if (LOG.isDebugEnabled()) { - LOG.debug("Server is not yet up; waiting up to " + - (maxWaitTime - now) + "ms", e); - } - Thread.sleep(100); - i--; // reset the try count - continue; - } - } else if (e instanceof java.net.SocketTimeoutException - && this.serverManager.isServerOnline(destination)) { - // In case socket is timed out and the region server is still online, - // the openRegion RPC could have been accepted by the server and - // just the response didn't go through. So we will retry to - // open the region on the same server. - if (LOG.isDebugEnabled()) { - LOG.debug("Bulk assigner openRegion() to " + destination - + " has timed out, but the regions might" - + " already be opened on it.", e); - } - // wait and reset the re-try count, server might be just busy. - Thread.sleep(100); - i--; - continue; - } else if (e instanceof FailedServerException && i < maximumAttempts) { - // In case the server is in the failed server list, no point to - // retry too soon. Retry after the failed_server_expiry time - long sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug(destination + " is on failed server list; waiting " - + sleepTime + "ms", e); - } - Thread.sleep(sleepTime); - continue; - } - throw e; - } - } - } catch (IOException e) { - // Can be a socket timeout, EOF, NoRouteToHost, etc - LOG.info("Unable to communicate with " + destination - + " in order to assign regions, ", e); - for (RegionState state: states) { - HRegionInfo region = state.getRegion(); - forceRegionStateToOffline(region, true); - } - return false; - } - } finally { - for (Lock lock : locks.values()) { - lock.unlock(); - } - } - - if (!failedToOpenRegions.isEmpty()) { - for (HRegionInfo region : failedToOpenRegions) { - if (!regionStates.isRegionOnline(region)) { - invokeAssign(region); - } - } - } - - // wait for assignment completion - ArrayList userRegionSet = new ArrayList(regions.size()); - for (HRegionInfo region: regions) { - if (!region.getTable().isSystemTable()) { - userRegionSet.add(region); - } - } - if (!waitForAssignment(userRegionSet, true, userRegionSet.size(), - System.currentTimeMillis())) { - LOG.debug("some user regions are still in transition: " + userRegionSet); - } - LOG.debug("Bulk assigning done for " + destination); - return true; - } finally { - metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTime() - startTime); - } - } - - /** - * Send CLOSE RPC if the server is online, otherwise, offline the region. - * - * The RPC will be sent only to the region sever found in the region state - * if it is passed in, otherwise, to the src server specified. If region - * state is not specified, we don't update region state at all, instead - * we just send the RPC call. This is useful for some cleanup without - * messing around the region states (see handleRegion, on region opened - * on an unexpected server scenario, for an example) - */ - private void unassign(final HRegionInfo region, - final ServerName server, final ServerName dest) { - for (int i = 1; i <= this.maximumAttempts; i++) { - if (this.server.isStopped() || this.server.isAborted()) { - LOG.debug("Server stopped/aborted; skipping unassign of " + region); - return; - } - if (!serverManager.isServerOnline(server)) { - LOG.debug("Offline " + region.getRegionNameAsString() - + ", no need to unassign since it's on a dead server: " + server); - regionStates.updateRegionState(region, State.OFFLINE); - return; - } - try { - // Send CLOSE RPC - if (serverManager.sendRegionClose(server, region, dest)) { - LOG.debug("Sent CLOSE to " + server + " for region " + - region.getRegionNameAsString()); - return; - } - // This never happens. Currently regionserver close always return true. - // Todo; this can now happen (0.96) if there is an exception in a coprocessor - LOG.warn("Server " + server + " region CLOSE RPC returned false for " + - region.getRegionNameAsString()); - } catch (Throwable t) { - long sleepTime = 0; - Configuration conf = this.server.getConfiguration(); - if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); - } - if (t instanceof RegionServerAbortedException - || t instanceof RegionServerStoppedException - || t instanceof ServerNotRunningYetException) { - // RS is aborting, we cannot offline the region since the region may need to do WAL - // recovery. Until we see the RS expiration, we should retry. - sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); - - } else if (t instanceof NotServingRegionException) { - LOG.debug("Offline " + region.getRegionNameAsString() - + ", it's not any more on " + server, t); - regionStates.updateRegionState(region, State.OFFLINE); - return; - } else if (t instanceof FailedServerException && i < maximumAttempts) { - // In case the server is in the failed server list, no point to - // retry too soon. Retry after the failed_server_expiry time - sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug(server + " is on failed server list; waiting " + sleepTime + "ms", t); - } - } - try { - if (sleepTime > 0) { - Thread.sleep(sleepTime); - } - } catch (InterruptedException ie) { - LOG.warn("Interrupted unassign " + region.getRegionNameAsString(), ie); - Thread.currentThread().interrupt(); - regionStates.updateRegionState(region, State.FAILED_CLOSE); - return; - } - LOG.info("Server " + server + " returned " + t + " for " - + region.getRegionNameAsString() + ", try=" + i - + " of " + this.maximumAttempts, t); - } - } - // Run out of attempts - regionStates.updateRegionState(region, State.FAILED_CLOSE); - } - - /** - * Set region to OFFLINE unless it is opening and forceNewPlan is false. - */ - private RegionState forceRegionStateToOffline( - final HRegionInfo region, final boolean forceNewPlan) { - RegionState state = regionStates.getRegionState(region); - if (state == null) { - LOG.warn("Assigning but not in region states: " + region); - state = regionStates.createRegionState(region); - } - - if (forceNewPlan && LOG.isDebugEnabled()) { - LOG.debug("Force region state offline " + state); - } - - switch (state.getState()) { - case OPEN: - case OPENING: - case PENDING_OPEN: - case CLOSING: - case PENDING_CLOSE: - if (!forceNewPlan) { - LOG.debug("Skip assigning " + - region + ", it is already " + state); - return null; - } - case FAILED_CLOSE: - case FAILED_OPEN: - regionStates.updateRegionState(region, State.PENDING_CLOSE); - unassign(region, state.getServerName(), null); - state = regionStates.getRegionState(region); - if (!state.isOffline() && !state.isClosed()) { - // If the region isn't offline, we can't re-assign - // it now. It will be assigned automatically after - // the regionserver reports it's closed. - return null; - } - case OFFLINE: - case CLOSED: - break; - default: - LOG.error("Trying to assign region " + region - + ", which is " + state); - return null; - } - return state; - } - - /** - * Caller must hold lock on the passed state object. - * @param state - * @param forceNewPlan - */ - private void assign(RegionState state, boolean forceNewPlan) { - long startTime = EnvironmentEdgeManager.currentTime(); - try { - Configuration conf = server.getConfiguration(); - RegionPlan plan = null; - long maxWaitTime = -1; - HRegionInfo region = state.getRegion(); - Throwable previousException = null; - for (int i = 1; i <= maximumAttempts; i++) { - if (server.isStopped() || server.isAborted()) { - LOG.info("Skip assigning " + region.getRegionNameAsString() - + ", the server is stopped/aborted"); - return; - } - - if (plan == null) { // Get a server for the region at first - try { - plan = getRegionPlan(region, forceNewPlan); - } catch (HBaseIOException e) { - LOG.warn("Failed to get region plan", e); - } - } - - if (plan == null) { - LOG.warn("Unable to determine a plan to assign " + region); - - // For meta region, we have to keep retrying until succeeding - if (region.isMetaRegion()) { - if (i == maximumAttempts) { - i = 0; // re-set attempt count to 0 for at least 1 retry - - LOG.warn("Unable to determine a plan to assign a hbase:meta region " + region + - " after maximumAttempts (" + this.maximumAttempts + - "). Reset attempts count and continue retrying."); - } - waitForRetryingMetaAssignment(); - continue; - } - - regionStates.updateRegionState(region, State.FAILED_OPEN); - return; - } - LOG.info("Assigning " + region.getRegionNameAsString() + - " to " + plan.getDestination()); - // Transition RegionState to PENDING_OPEN - regionStates.updateRegionState(region, - State.PENDING_OPEN, plan.getDestination()); - - boolean needNewPlan = false; - final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() + - " to " + plan.getDestination(); - try { - List favoredNodes = ServerName.EMPTY_SERVER_LIST; - if (shouldAssignFavoredNodes(region)) { - favoredNodes = server.getFavoredNodesManager().getFavoredNodesWithDNPort(region); - } - serverManager.sendRegionOpen(plan.getDestination(), region, favoredNodes); - return; // we're done - } catch (Throwable t) { - if (t instanceof RemoteException) { - t = ((RemoteException) t).unwrapRemoteException(); - } - previousException = t; - - // Should we wait a little before retrying? If the server is starting it's yes. - boolean hold = (t instanceof ServerNotRunningYetException); - - // In case socket is timed out and the region server is still online, - // the openRegion RPC could have been accepted by the server and - // just the response didn't go through. So we will retry to - // open the region on the same server. - boolean retry = !hold && (t instanceof java.net.SocketTimeoutException - && this.serverManager.isServerOnline(plan.getDestination())); - - if (hold) { - LOG.warn(assignMsg + ", waiting a little before trying on the same region server " + - "try=" + i + " of " + this.maximumAttempts, t); - - if (maxWaitTime < 0) { - maxWaitTime = EnvironmentEdgeManager.currentTime() - + this.server.getConfiguration().getLong( - "hbase.regionserver.rpc.startup.waittime", 60000); - } - try { - long now = EnvironmentEdgeManager.currentTime(); - if (now < maxWaitTime) { - if (LOG.isDebugEnabled()) { - LOG.debug("Server is not yet up; waiting up to " - + (maxWaitTime - now) + "ms", t); - } - Thread.sleep(100); - i--; // reset the try count - } else { - LOG.debug("Server is not up for a while; try a new one", t); - needNewPlan = true; - } - } catch (InterruptedException ie) { - LOG.warn("Failed to assign " - + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); - Thread.currentThread().interrupt(); - return; - } - } else if (retry) { - i--; // we want to retry as many times as needed as long as the RS is not dead. - if (LOG.isDebugEnabled()) { - LOG.debug(assignMsg + ", trying to assign to the same region server due ", t); - } - } else { - needNewPlan = true; - LOG.warn(assignMsg + ", trying to assign elsewhere instead;" + - " try=" + i + " of " + this.maximumAttempts, t); - } - } - - if (i == this.maximumAttempts) { - // For meta region, we have to keep retrying until succeeding - if (region.isMetaRegion()) { - i = 0; // re-set attempt count to 0 for at least 1 retry - LOG.warn(assignMsg + - ", trying to assign a hbase:meta region reached to maximumAttempts (" + - this.maximumAttempts + "). Reset attempt counts and continue retrying."); - waitForRetryingMetaAssignment(); - } - else { - // Don't reset the region state or get a new plan any more. - // This is the last try. - continue; - } - } - - // If region opened on destination of present plan, reassigning to new - // RS may cause double assignments. In case of RegionAlreadyInTransitionException - // reassigning to same RS. - if (needNewPlan) { - // Force a new plan and reassign. Will return null if no servers. - // The new plan could be the same as the existing plan since we don't - // exclude the server of the original plan, which should not be - // excluded since it could be the only server up now. - RegionPlan newPlan = null; - try { - newPlan = getRegionPlan(region, true); - } catch (HBaseIOException e) { - LOG.warn("Failed to get region plan", e); - } - if (newPlan == null) { - regionStates.updateRegionState(region, State.FAILED_OPEN); - LOG.warn("Unable to find a viable location to assign region " + - region.getRegionNameAsString()); - return; - } - - if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) { - // Clean out plan we failed execute and one that doesn't look like it'll - // succeed anyways; we need a new plan! - // Transition back to OFFLINE - regionStates.updateRegionState(region, State.OFFLINE); - plan = newPlan; - } else if(plan.getDestination().equals(newPlan.getDestination()) && - previousException instanceof FailedServerException) { - try { - LOG.info("Trying to re-assign " + region.getRegionNameAsString() + - " to the same failed server."); - Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT)); - } catch (InterruptedException ie) { - LOG.warn("Failed to assign " - + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); - Thread.currentThread().interrupt(); - return; - } - } - } - } - // Run out of attempts - regionStates.updateRegionState(region, State.FAILED_OPEN); - } finally { - metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTime() - startTime); - } - } - - private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { - if (this.tableStateManager.isTableState(region.getTable(), - TableState.State.DISABLED, - TableState.State.DISABLING) || replicasToClose.contains(region)) { - LOG.info("Table " + region.getTable() + " is disabled or disabling;" - + " skipping assign of " + region.getRegionNameAsString()); - offlineDisabledRegion(region); - return true; - } - return false; - } - - /** - * @param region the region to assign - * @param forceNewPlan If true, then if an existing plan exists, a new plan - * will be generated. - * @return Plan for passed region (If none currently, it creates one or - * if no servers to assign, it returns null). - */ - private RegionPlan getRegionPlan(final HRegionInfo region, - final boolean forceNewPlan) throws HBaseIOException { - // Pickup existing plan or make a new one - final String encodedName = region.getEncodedName(); - final List destServers = - serverManager.createDestinationServersList(); - - if (destServers.isEmpty()){ - LOG.warn("Can't move " + encodedName + - ", there is no destination server available."); - return null; - } - - RegionPlan randomPlan = null; - boolean newPlan = false; - RegionPlan existingPlan; - - synchronized (this.regionPlans) { - existingPlan = this.regionPlans.get(encodedName); - - if (existingPlan != null && existingPlan.getDestination() != null) { - LOG.debug("Found an existing plan for " + region.getRegionNameAsString() - + " destination server is " + existingPlan.getDestination() + - " accepted as a dest server = " + destServers.contains(existingPlan.getDestination())); - } - - if (forceNewPlan - || existingPlan == null - || existingPlan.getDestination() == null - || !destServers.contains(existingPlan.getDestination())) { - newPlan = true; - try { - randomPlan = new RegionPlan(region, null, - balancer.randomAssignment(region, destServers)); - } catch (IOException ex) { - LOG.warn("Failed to create new plan.",ex); - return null; - } - this.regionPlans.put(encodedName, randomPlan); - } - } - - if (newPlan) { - if (randomPlan.getDestination() == null) { - LOG.warn("Can't find a destination for " + encodedName); - return null; - } - if (LOG.isDebugEnabled()) { - LOG.debug("No previous transition plan found (or ignoring " + - "an existing plan) for " + region.getRegionNameAsString() + - "; generated random plan=" + randomPlan + "; " + destServers.size() + - " (online=" + serverManager.getOnlineServers().size() + - ") available servers, forceNewPlan=" + forceNewPlan); - } - return randomPlan; - } - if (LOG.isDebugEnabled()) { - LOG.debug("Using pre-existing plan for " + - region.getRegionNameAsString() + "; plan=" + existingPlan); - } - return existingPlan; - } - - /** - * Wait for some time before retrying meta table region assignment - */ - private void waitForRetryingMetaAssignment() { - try { - Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment); - } catch (InterruptedException e) { - LOG.error("Got exception while waiting for hbase:meta assignment"); - Thread.currentThread().interrupt(); - } - } - - /** - * Unassigns the specified region. - *

- * Updates the RegionState and sends the CLOSE RPC unless region is being - * split by regionserver; then the unassign fails (silently) because we - * presume the region being unassigned no longer exists (its been split out - * of existence). TODO: What to do if split fails and is rolled back and - * parent is revivified? - *

- * If a RegionPlan is already set, it will remain. - * - * @param region server to be unassigned - */ - public void unassign(HRegionInfo region) { - unassign(region, null); - } - - - /** - * Unassigns the specified region. - *

- * Updates the RegionState and sends the CLOSE RPC unless region is being - * split by regionserver; then the unassign fails (silently) because we - * presume the region being unassigned no longer exists (its been split out - * of existence). TODO: What to do if split fails and is rolled back and - * parent is revivified? - *

- * If a RegionPlan is already set, it will remain. - * - * @param region server to be unassigned - * @param dest the destination server of the region - */ - public void unassign(HRegionInfo region, ServerName dest) { - // TODO: Method needs refactoring. Ugly buried returns throughout. Beware! - LOG.debug("Starting unassign of " + region.getRegionNameAsString() - + " (offlining), current state: " + regionStates.getRegionState(region)); - - String encodedName = region.getEncodedName(); - // Grab the state of this region and synchronize on it - // We need a lock here as we're going to do a put later and we don't want multiple states - // creation - ReentrantLock lock = locker.acquireLock(encodedName); - RegionState state = regionStates.getRegionTransitionState(encodedName); - try { - if (state == null || state.isFailedClose()) { - if (state == null) { - // Region is not in transition. - // We can unassign it only if it's not SPLIT/MERGED. - state = regionStates.getRegionState(encodedName); - if (state != null && state.isUnassignable()) { - LOG.info("Attempting to unassign " + state + ", ignored"); - // Offline region will be reassigned below - return; - } - if (state == null || state.getServerName() == null) { - // We don't know where the region is, offline it. - // No need to send CLOSE RPC - LOG.warn("Attempting to unassign a region not in RegionStates " - + region.getRegionNameAsString() + ", offlined"); - regionOffline(region); - return; - } - } - state = regionStates.updateRegionState( - region, State.PENDING_CLOSE); - } else if (state.isFailedOpen()) { - // The region is not open yet - regionOffline(region); - return; - } else { - LOG.debug("Attempting to unassign " + - region.getRegionNameAsString() + " but it is " + - "already in transition (" + state.getState()); - return; - } - - unassign(region, state.getServerName(), dest); - } finally { - lock.unlock(); - - // Region is expected to be reassigned afterwards - if (!replicasToClose.contains(region) - && regionStates.isRegionInState(region, State.OFFLINE)) { - assign(region); - } - } - } - - /** - * Used by unit tests. Return the number of regions opened so far in the life - * of the master. Increases by one every time the master opens a region - * @return the counter value of the number of regions opened so far - */ - public int getNumRegionsOpened() { - return numRegionsOpened.get(); - } - - /** - * Waits until the specified region has completed assignment. - *

- * If the region is already assigned, returns immediately. Otherwise, method - * blocks until the region is assigned. - * @param regionInfo region to wait on assignment for - * @return true if the region is assigned false otherwise. - * @throws InterruptedException - */ - public boolean waitForAssignment(HRegionInfo regionInfo) - throws InterruptedException { - ArrayList regionSet = new ArrayList(1); - regionSet.add(regionInfo); - return waitForAssignment(regionSet, true, Long.MAX_VALUE); - } - - /** - * Waits until the specified region has completed assignment, or the deadline is reached. - */ - protected boolean waitForAssignment(final Collection regionSet, - final boolean waitTillAllAssigned, final int reassigningRegions, - final long minEndTime) throws InterruptedException { - long deadline = minEndTime + bulkPerRegionOpenTimeGuesstimate * (reassigningRegions + 1); - if (deadline < 0) { // Overflow - deadline = Long.MAX_VALUE; // wait forever - } - return waitForAssignment(regionSet, waitTillAllAssigned, deadline); - } - - /** - * Waits until the specified region has completed assignment, or the deadline is reached. - * @param regionSet set of region to wait on. the set is modified and the assigned regions removed - * @param waitTillAllAssigned true if we should wait all the regions to be assigned - * @param deadline the timestamp after which the wait is aborted - * @return true if all the regions are assigned false otherwise. - * @throws InterruptedException - */ - protected boolean waitForAssignment(final Collection regionSet, - final boolean waitTillAllAssigned, final long deadline) throws InterruptedException { - // We're not synchronizing on regionsInTransition now because we don't use any iterator. - while (!regionSet.isEmpty() && !server.isStopped() && deadline > System.currentTimeMillis()) { - int failedOpenCount = 0; - Iterator regionInfoIterator = regionSet.iterator(); - while (regionInfoIterator.hasNext()) { - HRegionInfo hri = regionInfoIterator.next(); - if (regionStates.isRegionOnline(hri) || regionStates.isRegionInState(hri, - State.SPLITTING, State.SPLIT, State.MERGING, State.MERGED)) { - regionInfoIterator.remove(); - } else if (regionStates.isRegionInState(hri, State.FAILED_OPEN)) { - failedOpenCount++; - } - } - if (!waitTillAllAssigned) { - // No need to wait, let assignment going on asynchronously - break; - } - if (!regionSet.isEmpty()) { - if (failedOpenCount == regionSet.size()) { - // all the regions we are waiting had an error on open. - break; - } - regionStates.waitForUpdate(100); - } - } - return regionSet.isEmpty(); - } - - /** - * Assigns the hbase:meta region or a replica. - *

- * Assumes that hbase:meta is currently closed and is not being actively served by - * any RegionServer. - * @param hri TODO - */ - public void assignMeta(HRegionInfo hri) throws KeeperException { - regionStates.updateRegionState(hri, State.OFFLINE); - assign(hri); - } - - /** - * Assigns specified regions retaining assignments, if any. - *

- * This is a synchronous call and will return once every region has been - * assigned. If anything fails, an exception is thrown - * @throws InterruptedException - * @throws IOException - */ - public void assign(Map regions) - throws IOException, InterruptedException { - if (regions == null || regions.isEmpty()) { - return; - } - List servers = serverManager.createDestinationServersList(); - if (servers == null || servers.isEmpty()) { - throw new IOException("Found no destination server to assign region(s)"); - } - - // Reuse existing assignment info - Map> bulkPlan = - balancer.retainAssignment(regions, servers); - if (bulkPlan == null) { - throw new IOException("Unable to determine a plan to assign region(s)"); - } - - processBogusAssignments(bulkPlan); - - assign(regions.size(), servers.size(), - "retainAssignment=true", bulkPlan); - } - - /** - * Assigns specified regions round robin, if any. - *

- * This is a synchronous call and will return once every region has been - * assigned. If anything fails, an exception is thrown - * @throws InterruptedException - * @throws IOException - */ - public void assign(List regions) - throws IOException, InterruptedException { - if (regions == null || regions.isEmpty()) { - return; - } - - List servers = serverManager.createDestinationServersList(); - if (servers == null || servers.isEmpty()) { - throw new IOException("Found no destination server to assign region(s)"); - } - - // Generate a round-robin bulk assignment plan - Map> bulkPlan = balancer.roundRobinAssignment(regions, servers); - if (bulkPlan == null) { - throw new IOException("Unable to determine a plan to assign region(s)"); - } - - processBogusAssignments(bulkPlan); - - assign(regions.size(), servers.size(), "round-robin=true", bulkPlan); - } - - private void assign(int regions, int totalServers, - String message, Map> bulkPlan) - throws InterruptedException, IOException { - - int servers = bulkPlan.size(); - if (servers == 1 || (regions < bulkAssignThresholdRegions - && servers < bulkAssignThresholdServers)) { - - // Not use bulk assignment. This could be more efficient in small - // cluster, especially mini cluster for testing, so that tests won't time out - if (LOG.isTraceEnabled()) { - LOG.trace("Not using bulk assignment since we are assigning only " + regions + - " region(s) to " + servers + " server(s)"); - } - - // invoke assignment (async) - ArrayList userRegionSet = new ArrayList(regions); - for (Map.Entry> plan: bulkPlan.entrySet()) { - if (!assign(plan.getKey(), plan.getValue()) && !server.isStopped()) { - for (HRegionInfo region: plan.getValue()) { - if (!regionStates.isRegionOnline(region)) { - invokeAssign(region); - if (!region.getTable().isSystemTable()) { - userRegionSet.add(region); - } - } - } - } - } - - // wait for assignment completion - if (!waitForAssignment(userRegionSet, true, userRegionSet.size(), - System.currentTimeMillis())) { - LOG.debug("some user regions are still in transition: " + userRegionSet); - } - } else { - LOG.info("Bulk assigning " + regions + " region(s) across " - + totalServers + " server(s), " + message); - - // Use fixed count thread pool assigning. - BulkAssigner ba = new GeneralBulkAssigner( - this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned); - ba.bulkAssign(); - LOG.info("Bulk assigning done"); - } - } - - /** - * Assigns all user regions, if any exist. Used during cluster startup. - *

- * This is a synchronous call and will return once every region has been - * assigned. If anything fails, an exception is thrown and the cluster - * should be shutdown. - * @throws InterruptedException - * @throws IOException - */ - private void assignAllUserRegions(Map allRegions) - throws IOException, InterruptedException { - if (allRegions == null || allRegions.isEmpty()) return; - - // Determine what type of assignment to do on startup - boolean retainAssignment = server.getConfiguration(). - getBoolean("hbase.master.startup.retainassign", true); - - Set regionsFromMetaScan = allRegions.keySet(); - if (retainAssignment) { - assign(allRegions); - } else { - List regions = new ArrayList(regionsFromMetaScan); - assign(regions); - } - - for (HRegionInfo hri : regionsFromMetaScan) { - TableName tableName = hri.getTable(); - if (!tableStateManager.isTableState(tableName, - TableState.State.ENABLED)) { - setEnabledTable(tableName); - } - } - // assign all the replicas that were not recorded in the meta - assign(replicaRegionsNotRecordedInMeta(regionsFromMetaScan, (MasterServices)server)); - } - - /** - * Get number of replicas of a table - */ - private static int getNumReplicas(MasterServices master, TableName table) { - int numReplica = 1; - try { - HTableDescriptor htd = master.getTableDescriptors().get(table); - if (htd == null) { - LOG.warn("master can not get TableDescriptor from table '" + table); - } else { - numReplica = htd.getRegionReplication(); - } - } catch (IOException e){ - LOG.warn("Couldn't get the replication attribute of the table " + table + " due to " - + e.getMessage()); - } - return numReplica; - } - - /** - * Get a list of replica regions that are: - * not recorded in meta yet. We might not have recorded the locations - * for the replicas since the replicas may not have been online yet, master restarted - * in the middle of assigning, ZK erased, etc. - * @param regionsRecordedInMeta the list of regions we know are recorded in meta - * either as a default, or, as the location of a replica - * @param master - * @return list of replica regions - * @throws IOException - */ - public static List replicaRegionsNotRecordedInMeta( - Set regionsRecordedInMeta, MasterServices master)throws IOException { - List regionsNotRecordedInMeta = new ArrayList(); - for (HRegionInfo hri : regionsRecordedInMeta) { - TableName table = hri.getTable(); - if(master.getTableDescriptors().get(table) == null) - continue; - int desiredRegionReplication = getNumReplicas(master, table); - for (int i = 0; i < desiredRegionReplication; i++) { - HRegionInfo replica = RegionReplicaUtil.getRegionInfoForReplica(hri, i); - if (regionsRecordedInMeta.contains(replica)) continue; - regionsNotRecordedInMeta.add(replica); - } - } - return regionsNotRecordedInMeta; - } - - /** - * Rebuild the list of user regions and assignment information. - * Updates regionstates with findings as we go through list of regions. - * @return set of servers not online that hosted some regions according to a scan of hbase:meta - * @throws IOException - */ - Set rebuildUserRegions() throws - IOException, KeeperException { - Set disabledOrEnablingTables = tableStateManager.getTablesInStates( - TableState.State.DISABLED, TableState.State.ENABLING); - - Set disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - TableState.State.DISABLED, - TableState.State.DISABLING, - TableState.State.ENABLING); - - // Region assignment from META - List results = MetaTableAccessor.fullScanRegions(server.getConnection()); - // Get any new but slow to checkin region server that joined the cluster - Set onlineServers = serverManager.getOnlineServers().keySet(); - // Set of offline servers to be returned - Set offlineServers = new HashSet(); - // Iterate regions in META - for (Result result : results) { - if (result == null && LOG.isDebugEnabled()){ - LOG.debug("null result from meta - ignoring but this is strange."); - continue; - } - // keep a track of replicas to close. These were the replicas of the originally - // unmerged regions. The master might have closed them before but it mightn't - // maybe because it crashed. - PairOfSameType p = MetaTableAccessor.getMergeRegions(result); - if (p.getFirst() != null && p.getSecond() != null) { - int numReplicas = getNumReplicas(server, p.getFirst().getTable()); - for (HRegionInfo merge : p) { - for (int i = 1; i < numReplicas; i++) { - replicasToClose.add(RegionReplicaUtil.getRegionInfoForReplica(merge, i)); - } - } - } - RegionLocations rl = MetaTableAccessor.getRegionLocations(result); - if (rl == null) { - continue; - } - HRegionLocation[] locations = rl.getRegionLocations(); - if (locations == null) { - continue; - } - for (HRegionLocation hrl : locations) { - if (hrl == null) continue; - HRegionInfo regionInfo = hrl.getRegionInfo(); - if (regionInfo == null) continue; - int replicaId = regionInfo.getReplicaId(); - State state = RegionStateStore.getRegionState(result, replicaId); - // keep a track of replicas to close. These were the replicas of the split parents - // from the previous life of the master. The master should have closed them before - // but it couldn't maybe because it crashed - if (replicaId == 0 && state.equals(State.SPLIT)) { - for (HRegionLocation h : locations) { - replicasToClose.add(h.getRegionInfo()); - } - } - ServerName lastHost = hrl.getServerName(); - ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId); - regionStates.createRegionState(regionInfo, state, regionLocation, lastHost); - if (!regionStates.isRegionInState(regionInfo, State.OPEN)) { - // Region is not open (either offline or in transition), skip - continue; - } - TableName tableName = regionInfo.getTable(); - if (!onlineServers.contains(regionLocation)) { - // Region is located on a server that isn't online - offlineServers.add(regionLocation); - } else if (!disabledOrEnablingTables.contains(tableName)) { - // Region is being served and on an active server - // add only if region not in disabled or enabling table - regionStates.regionOnline(regionInfo, regionLocation); - balancer.regionOnline(regionInfo, regionLocation); - } - // need to enable the table if not disabled or disabling or enabling - // this will be used in rolling restarts - if (!disabledOrDisablingOrEnabling.contains(tableName) - && !getTableStateManager().isTableState(tableName, - TableState.State.ENABLED)) { - setEnabledTable(tableName); - } - } - } - return offlineServers; - } - - /** - * Processes list of regions in transition at startup - */ - void processRegionsInTransition(Collection regionsInTransition) { - // We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions - // in case the RPC call is not sent out yet before the master was shut down - // since we update the state before we send the RPC call. We can't update - // the state after the RPC call. Otherwise, we don't know what's happened - // to the region if the master dies right after the RPC call is out. - for (RegionState regionState: regionsInTransition) { - LOG.info("Processing " + regionState); - ServerName serverName = regionState.getServerName(); - // Server could be null in case of FAILED_OPEN when master cannot find a region plan. In that - // case, try assigning it here. - if (serverName != null && !serverManager.getOnlineServers().containsKey(serverName)) { - LOG.info("Server " + serverName + " isn't online. SSH will handle this"); - continue; // SSH will handle it - } - HRegionInfo regionInfo = regionState.getRegion(); - RegionState.State state = regionState.getState(); - switch (state) { - case CLOSED: - invokeAssign(regionState.getRegion()); - break; - case PENDING_OPEN: - retrySendRegionOpen(regionState); - break; - case PENDING_CLOSE: - retrySendRegionClose(regionState); - break; - case FAILED_CLOSE: - case FAILED_OPEN: - invokeUnAssign(regionInfo); - break; - default: - // No process for other states - break; - } - } - } - - /** - * At master failover, for pending_open region, make sure - * sendRegionOpen RPC call is sent to the target regionserver - */ - private void retrySendRegionOpen(final RegionState regionState) { - this.executorService.submit( - new EventHandler(server, EventType.M_MASTER_RECOVERY) { - @Override - public void process() throws IOException { - HRegionInfo hri = regionState.getRegion(); - ServerName serverName = regionState.getServerName(); - ReentrantLock lock = locker.acquireLock(hri.getEncodedName()); - try { - for (int i = 1; i <= maximumAttempts; i++) { - if (!serverManager.isServerOnline(serverName) - || server.isStopped() || server.isAborted()) { - return; // No need any more - } - try { - if (!regionState.equals(regionStates.getRegionState(hri))) { - return; // Region is not in the expected state any more - } - List favoredNodes = ServerName.EMPTY_SERVER_LIST; - if (shouldAssignFavoredNodes(hri)) { - FavoredNodesManager fnm = ((MasterServices)server).getFavoredNodesManager(); - favoredNodes = fnm.getFavoredNodesWithDNPort(hri); - } - serverManager.sendRegionOpen(serverName, hri, favoredNodes); - return; // we're done - } catch (Throwable t) { - if (t instanceof RemoteException) { - t = ((RemoteException) t).unwrapRemoteException(); - } - if (t instanceof FailedServerException && i < maximumAttempts) { - // In case the server is in the failed server list, no point to - // retry too soon. Retry after the failed_server_expiry time - try { - Configuration conf = this.server.getConfiguration(); - long sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug(serverName + " is on failed server list; waiting " - + sleepTime + "ms", t); - } - Thread.sleep(sleepTime); - continue; - } catch (InterruptedException ie) { - LOG.warn("Failed to assign " - + hri.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(hri, State.FAILED_OPEN); - Thread.currentThread().interrupt(); - return; - } - } - if (serverManager.isServerOnline(serverName) - && t instanceof java.net.SocketTimeoutException) { - i--; // reset the try count - } else { - LOG.info("Got exception in retrying sendRegionOpen for " - + regionState + "; try=" + i + " of " + maximumAttempts, t); - } - Threads.sleep(100); - } - } - // Run out of attempts - regionStates.updateRegionState(hri, State.FAILED_OPEN); - } finally { - lock.unlock(); - } - } - }); - } - - /** - * At master failover, for pending_close region, make sure - * sendRegionClose RPC call is sent to the target regionserver - */ - private void retrySendRegionClose(final RegionState regionState) { - this.executorService.submit( - new EventHandler(server, EventType.M_MASTER_RECOVERY) { - @Override - public void process() throws IOException { - HRegionInfo hri = regionState.getRegion(); - ServerName serverName = regionState.getServerName(); - ReentrantLock lock = locker.acquireLock(hri.getEncodedName()); - try { - for (int i = 1; i <= maximumAttempts; i++) { - if (!serverManager.isServerOnline(serverName) - || server.isStopped() || server.isAborted()) { - return; // No need any more - } - try { - if (!regionState.equals(regionStates.getRegionState(hri))) { - return; // Region is not in the expected state any more - } - serverManager.sendRegionClose(serverName, hri, null); - return; // Done. - } catch (Throwable t) { - if (t instanceof RemoteException) { - t = ((RemoteException) t).unwrapRemoteException(); - } - if (t instanceof FailedServerException && i < maximumAttempts) { - // In case the server is in the failed server list, no point to - // retry too soon. Retry after the failed_server_expiry time - try { - Configuration conf = this.server.getConfiguration(); - long sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, - RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug(serverName + " is on failed server list; waiting " - + sleepTime + "ms", t); - } - Thread.sleep(sleepTime); - continue; - } catch (InterruptedException ie) { - LOG.warn("Failed to unassign " - + hri.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(hri, RegionState.State.FAILED_CLOSE); - Thread.currentThread().interrupt(); - return; - } - } - if (serverManager.isServerOnline(serverName) - && t instanceof java.net.SocketTimeoutException) { - i--; // reset the try count - } else { - LOG.info("Got exception in retrying sendRegionClose for " - + regionState + "; try=" + i + " of " + maximumAttempts, t); - } - Threads.sleep(100); - } - } - // Run out of attempts - regionStates.updateRegionState(hri, State.FAILED_CLOSE); - } finally { - lock.unlock(); - } - } - }); - } - - /** - * Set Regions in transitions metrics. - * This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized. - * This iterator is not fail fast, which may lead to stale read; but that's better than - * creating a copy of the map for metrics computation, as this method will be invoked - * on a frequent interval. - */ - public void updateRegionsInTransitionMetrics() { - long currentTime = System.currentTimeMillis(); - int totalRITs = 0; - int totalRITsOverThreshold = 0; - long oldestRITTime = 0; - int ritThreshold = this.server.getConfiguration(). - getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000); - for (RegionState state: regionStates.getRegionsInTransition()) { - totalRITs++; - long ritTime = currentTime - state.getStamp(); - if (ritTime > ritThreshold) { // more than the threshold - totalRITsOverThreshold++; - } - if (oldestRITTime < ritTime) { - oldestRITTime = ritTime; - } - } - if (this.metricsAssignmentManager != null) { - this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime); - this.metricsAssignmentManager.updateRITCount(totalRITs); - this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold); - } - } - - /** - * @param region Region whose plan we are to clear. - */ - private void clearRegionPlan(final HRegionInfo region) { - synchronized (this.regionPlans) { - this.regionPlans.remove(region.getEncodedName()); - } - } - - /** - * Wait on region to clear regions-in-transition. - * @param hri Region to wait on. - * @throws IOException - */ - public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri) - throws IOException, InterruptedException { - waitOnRegionToClearRegionsInTransition(hri, -1L); - } - - /** - * Wait on region to clear regions-in-transition or time out - * @param hri - * @param timeOut Milliseconds to wait for current region to be out of transition state. - * @return True when a region clears regions-in-transition before timeout otherwise false - * @throws InterruptedException - */ - public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut) - throws InterruptedException { - if (!regionStates.isRegionInTransition(hri)) { - return true; - } - long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTime() - + timeOut; - // There is already a timeout monitor on regions in transition so I - // should not have to have one here too? - LOG.info("Waiting for " + hri.getEncodedName() + - " to leave regions-in-transition, timeOut=" + timeOut + " ms."); - while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) { - regionStates.waitForUpdate(100); - if (EnvironmentEdgeManager.currentTime() > end) { - LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned."); - return false; - } - } - if (this.server.isStopped()) { - LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set"); - return false; - } - return true; - } - - void invokeAssign(HRegionInfo regionInfo) { - threadPoolExecutorService.submit(new AssignCallable(this, regionInfo)); - } - - void invokeAssignLater(HRegionInfo regionInfo, long sleepMillis) { - scheduledThreadPoolExecutor.schedule(new DelayedAssignCallable( - new AssignCallable(this, regionInfo)), sleepMillis, TimeUnit.MILLISECONDS); - } - - void invokeUnAssign(HRegionInfo regionInfo) { - threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo)); - } - - public boolean isCarryingMeta(ServerName serverName) { - return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO); - } - - public boolean isCarryingMetaReplica(ServerName serverName, int replicaId) { - return isCarryingRegion(serverName, - RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, replicaId)); - } - - public boolean isCarryingMetaReplica(ServerName serverName, HRegionInfo metaHri) { - return isCarryingRegion(serverName, metaHri); - } - - /** - * Check if the shutdown server carries the specific region. - * @return whether the serverName currently hosts the region - */ - private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) { - RegionState regionState = regionStates.getRegionTransitionState(hri); - ServerName transitionAddr = regionState != null? regionState.getServerName(): null; - if (transitionAddr != null) { - boolean matchTransitionAddr = transitionAddr.equals(serverName); - LOG.debug("Checking region=" + hri.getRegionNameAsString() - + ", transitioning on server=" + matchTransitionAddr - + " server being checked: " + serverName - + ", matches=" + matchTransitionAddr); - return matchTransitionAddr; - } - - ServerName assignedAddr = regionStates.getRegionServerOfRegion(hri); - boolean matchAssignedAddr = serverName.equals(assignedAddr); - LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() - + " is on server=" + assignedAddr + ", server being checked: " - + serverName); - return matchAssignedAddr; - } - - /** - * Clean out crashed server removing any assignments. - * @param sn Server that went down. - * @return list of regions in transition on this server - */ - public List cleanOutCrashedServerReferences(final ServerName sn) { - // Clean out any existing assignment plans for this server - synchronized (this.regionPlans) { - for (Iterator > i = this.regionPlans.entrySet().iterator(); - i.hasNext();) { - Map.Entry e = i.next(); - ServerName otherSn = e.getValue().getDestination(); - // The name will be null if the region is planned for a random assign. - if (otherSn != null && otherSn.equals(sn)) { - // Use iterator's remove else we'll get CME - i.remove(); - } - } - } - List rits = regionStates.serverOffline(sn); - for (Iterator it = rits.iterator(); it.hasNext(); ) { - HRegionInfo hri = it.next(); - String encodedName = hri.getEncodedName(); - - // We need a lock on the region as we could update it - Lock lock = locker.acquireLock(encodedName); - try { - RegionState regionState = regionStates.getRegionTransitionState(encodedName); - if (regionState == null - || (regionState.getServerName() != null && !regionState.isOnServer(sn)) - || !RegionStates.isOneOfStates(regionState, State.PENDING_OPEN, - State.OPENING, State.FAILED_OPEN, State.FAILED_CLOSE, State.OFFLINE)) { - LOG.info("Skip " + regionState + " since it is not opening/failed_close" - + " on the dead server any more: " + sn); - it.remove(); - } else { - if (tableStateManager.isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - regionStates.regionOffline(hri); - it.remove(); - continue; - } - // Mark the region offline and assign it again by SSH - regionStates.updateRegionState(hri, State.OFFLINE); - } - } finally { - lock.unlock(); - } - } - return rits; - } - - /** - * @param plan Plan to execute. - */ - public void balance(final RegionPlan plan) { - - HRegionInfo hri = plan.getRegionInfo(); - TableName tableName = hri.getTable(); - if (tableStateManager.isTableState(tableName, - TableState.State.DISABLED, TableState.State.DISABLING)) { - LOG.info("Ignored moving region of disabling/disabled table " - + tableName); - return; - } - - // Move the region only if it's assigned - String encodedName = hri.getEncodedName(); - ReentrantLock lock = locker.acquireLock(encodedName); - try { - if (!regionStates.isRegionOnline(hri)) { - RegionState state = regionStates.getRegionState(encodedName); - LOG.info("Ignored moving region not assigned: " + hri + ", " - + (state == null ? "not in region states" : state)); - return; - } - synchronized (this.regionPlans) { - this.regionPlans.put(plan.getRegionName(), plan); - } - unassign(hri, plan.getDestination()); - } finally { - lock.unlock(); - } - } - - public void stop() { - // Shutdown the threadpool executor service - threadPoolExecutorService.shutdownNow(); - regionStateStore.stop(); - } - - protected void setEnabledTable(TableName tableName) { - try { - this.tableStateManager.setTableState(tableName, - TableState.State.ENABLED); - } catch (IOException e) { - // here we can abort as it is the start up flow - String errorMsg = "Unable to ensure that the table " + tableName - + " will be" + " enabled because of a ZooKeeper issue"; - LOG.error(errorMsg); - this.server.abort(errorMsg, e); - } - } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", - justification="Worth fixing but not the end of the world.") - private String onRegionFailedOpen(final RegionState current, - final HRegionInfo hri, final ServerName serverName) { - // The region must be opening on this server. - // If current state is failed_open on the same server, - // it could be a reportRegionTransition RPC retry. - if (current == null || !current.isOpeningOrFailedOpenOnServer(serverName)) { - return hri.getShortNameToLog() + " is not opening on " + serverName; - } - - // Just return in case of retrying - if (current.isFailedOpen()) { - return null; - } - - String encodedName = hri.getEncodedName(); - // FindBugs: AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION Worth fixing!!! - AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName); - if (failedOpenCount == null) { - failedOpenCount = new AtomicInteger(); - // No need to use putIfAbsent, or extra synchronization since - // this whole handleRegion block is locked on the encoded region - // name, and failedOpenTracker is updated only in this block - failedOpenTracker.put(encodedName, failedOpenCount); - } - if (failedOpenCount.incrementAndGet() >= maximumAttempts && !hri.isMetaRegion()) { - regionStates.updateRegionState(hri, State.FAILED_OPEN); - // remove the tracking info to save memory, also reset - // the count for next open initiative - failedOpenTracker.remove(encodedName); - } else { - if (hri.isMetaRegion() && failedOpenCount.get() >= maximumAttempts) { - // Log a warning message if a meta region failedOpenCount exceeds maximumAttempts - // so that we are aware of potential problem if it persists for a long time. - LOG.warn("Failed to open the hbase:meta region " + - hri.getRegionNameAsString() + " after" + - failedOpenCount.get() + " retries. Continue retrying."); - } - - // Handle this the same as if it were opened and then closed. - RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED); - if (regionState != null) { - // When there are more than one region server a new RS is selected as the - // destination and the same is updated in the region plan. (HBASE-5546) - if (getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING) || - replicasToClose.contains(hri)) { - offlineDisabledRegion(hri); - return null; - } - regionStates.updateRegionState(hri, RegionState.State.CLOSED); - // This below has to do w/ online enable/disable of a table - removeClosedRegion(hri); - try { - getRegionPlan(hri, true); - } catch (HBaseIOException e) { - LOG.warn("Failed to get region plan", e); - } - // Have the current thread sleep a bit before resubmitting the RPC request - long sleepTime = backoffPolicy.getBackoffTime(retryConfig, - failedOpenTracker.get(encodedName).get()); - invokeAssignLater(hri, sleepTime); - } - } - // Null means no error - return null; - } - - private String onRegionOpen(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be opening on this server. - // If current state is already opened on the same server, - // it could be a reportRegionTransition RPC retry. - if (current == null || !current.isOpeningOrOpenedOnServer(serverName)) { - return hri.getShortNameToLog() + " is not opening on " + serverName; - } - - // Just return in case of retrying - if (current.isOpened()) { - return null; - } - - long openSeqNum = transition.hasOpenSeqNum() - ? transition.getOpenSeqNum() : HConstants.NO_SEQNUM; - if (openSeqNum < 0) { - return "Newly opened region has invalid open seq num " + openSeqNum; - } - regionOnline(hri, serverName, openSeqNum); - - // reset the count, if any - failedOpenTracker.remove(hri.getEncodedName()); - if (getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - invokeUnAssign(hri); - } - return null; - } - - private String onRegionClosed(final RegionState current, - final HRegionInfo hri, final ServerName serverName) { - // Region will be usually assigned right after closed. When a RPC retry comes - // in, the region may already have moved away from closed state. However, on the - // region server side, we don't care much about the response for this transition. - // We only make sure master has got and processed this report, either - // successfully or not. So this is fine, not a problem at all. - if (current == null || !current.isClosingOrClosedOnServer(serverName)) { - return hri.getShortNameToLog() + " is not closing on " + serverName; - } - - // Just return in case of retrying - if (current.isClosed()) { - return null; - } - - if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED, - TableState.State.DISABLING) || replicasToClose.contains(hri)) { - offlineDisabledRegion(hri); - return null; - } - - regionStates.updateRegionState(hri, RegionState.State.CLOSED); - sendRegionClosedNotification(hri); - // This below has to do w/ online enable/disable of a table - removeClosedRegion(hri); - invokeAssign(hri); - return null; - } - - private String onRegionReadyToSplit(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be opened on this server. - // If current state is already splitting on the same server, - // it could be a reportRegionTransition RPC retry. - if (current == null || !current.isSplittingOrOpenedOnServer(serverName)) { - return hri.getShortNameToLog() + " is not opening on " + serverName; - } - - if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled( - MasterSwitchType.SPLIT)) { - return "split switch is off!"; - } - - // Just return in case of retrying - if (current.isSplitting()) { - return null; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a != null || rs_b != null) { - return "Some daughter is already existing. " - + "a=" + rs_a + ", b=" + rs_b; - } - - // Server holding is not updated at this stage. - // It is done after PONR. - regionStates.updateRegionState(hri, State.SPLITTING); - regionStates.createRegionState( - a, State.SPLITTING_NEW, serverName, null); - regionStates.createRegionState( - b, State.SPLITTING_NEW, serverName, null); - return null; - } - - private String onRegionSplitPONR(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be splitting on this server, and the daughters must be in - // splitting_new state. To check RPC retry, we use server holding info. - if (current == null || !current.isSplittingOnServer(serverName)) { - return hri.getShortNameToLog() + " is not splitting on " + serverName; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - - // Master could have restarted and lost the new region - // states, if so, they must be lost together - if (rs_a == null && rs_b == null) { - rs_a = regionStates.createRegionState( - a, State.SPLITTING_NEW, serverName, null); - rs_b = regionStates.createRegionState( - b, State.SPLITTING_NEW, serverName, null); - } - - if (rs_a == null || !rs_a.isSplittingNewOnServer(serverName) - || rs_b == null || !rs_b.isSplittingNewOnServer(serverName)) { - return "Some daughter is not known to be splitting on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - // Just return in case of retrying - if (!regionStates.isRegionOnServer(hri, serverName)) { - return null; - } - - try { - regionStates.splitRegion(hri, a, b, serverName); - processFavoredNodesForDaughters(hri, a ,b); - } catch (IOException ioe) { - LOG.info("Failed to record split region " + hri.getShortNameToLog()); - return "Failed to record the splitting in meta"; - } - return null; - } - - public void assignDaughterRegions( - final HRegionInfo parentHRI, - final HRegionInfo daughterAHRI, - final HRegionInfo daughterBHRI) throws InterruptedException, IOException { - //Offline the parent region - regionOffline(parentHRI, State.SPLIT); - - //Set daughter regions to offline - regionStates.prepareAssignDaughters(daughterAHRI, daughterBHRI); - - // Assign daughter regions - invokeAssign(daughterAHRI); - invokeAssign(daughterBHRI); - - Callable splitReplicasCallable = new Callable() { - @Override - public Object call() { - doSplittingOfReplicas(parentHRI, daughterAHRI, daughterBHRI); - return null; - } - }; - threadPoolExecutorService.submit(splitReplicasCallable); - - // wait for assignment completion - ArrayList regionAssignSet = new ArrayList(2); - regionAssignSet.add(daughterAHRI); - regionAssignSet.add(daughterBHRI); - while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), - Long.MAX_VALUE)) { - LOG.debug("some user regions are still in transition: " + regionAssignSet); - } - } - - private String onRegionSplit(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be splitting on this server, and the daughters must be in - // splitting_new state. - // If current state is already split on the same server, - // it could be a reportRegionTransition RPC retry. - if (current == null || !current.isSplittingOrSplitOnServer(serverName)) { - return hri.getShortNameToLog() + " is not splitting on " + serverName; - } - - // Just return in case of retrying - if (current.isSplit()) { - return null; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a == null || !rs_a.isSplittingNewOnServer(serverName) - || rs_b == null || !rs_b.isSplittingNewOnServer(serverName)) { - return "Some daughter is not known to be splitting on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - if (TEST_SKIP_SPLIT_HANDLING) { - return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set"; - } - regionOffline(hri, State.SPLIT); - regionOnline(a, serverName, 1); - regionOnline(b, serverName, 1); - - // User could disable the table before master knows the new region. - if (getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - invokeUnAssign(a); - invokeUnAssign(b); - } else { - Callable splitReplicasCallable = new Callable() { - @Override - public Object call() { - doSplittingOfReplicas(hri, a, b); - return null; - } - }; - threadPoolExecutorService.submit(splitReplicasCallable); - } - return null; - } - - private String onRegionSplitReverted(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be splitting on this server, and the daughters must be in - // splitting_new state. - // If the region is in open state, it could be an RPC retry. - if (current == null || !current.isSplittingOrOpenedOnServer(serverName)) { - return hri.getShortNameToLog() + " is not splitting on " + serverName; - } - - // Just return in case of retrying - if (current.isOpened()) { - return null; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a == null || !rs_a.isSplittingNewOnServer(serverName) - || rs_b == null || !rs_b.isSplittingNewOnServer(serverName)) { - return "Some daughter is not known to be splitting on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - regionOnline(hri, serverName); - regionOffline(a); - regionOffline(b); - if (getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - invokeUnAssign(hri); - } - return null; - } - - private String onRegionReadyToMerge(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be new, and the daughters must be open on this server. - // If the region is in merge_new state, it could be an RPC retry. - if (current != null && !current.isMergingNewOnServer(serverName)) { - return "Merging daughter region already exists, p=" + current; - } - - if (!((HMaster)server).getSplitOrMergeTracker().isSplitOrMergeEnabled( - MasterSwitchType.MERGE)) { - return "merge switch is off!"; - } - // Just return in case of retrying - if (current != null) { - return null; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - Set encodedNames = new HashSet(2); - encodedNames.add(a.getEncodedName()); - encodedNames.add(b.getEncodedName()); - Map locks = locker.acquireLocks(encodedNames); - try { - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a == null || !rs_a.isOpenedOnServer(serverName) - || rs_b == null || !rs_b.isOpenedOnServer(serverName)) { - return "Some daughter is not in a state to merge on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - regionStates.updateRegionState(a, State.MERGING); - regionStates.updateRegionState(b, State.MERGING); - regionStates.createRegionState( - hri, State.MERGING_NEW, serverName, null); - return null; - } finally { - for (Lock lock: locks.values()) { - lock.unlock(); - } - } - } - - private String onRegionMergePONR(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be in merging_new state, and the daughters must be - // merging. To check RPC retry, we use server holding info. - if (current != null && !current.isMergingNewOnServer(serverName)) { - return hri.getShortNameToLog() + " is not merging on " + serverName; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a == null || !rs_a.isMergingOnServer(serverName) - || rs_b == null || !rs_b.isMergingOnServer(serverName)) { - return "Some daughter is not known to be merging on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - // Master could have restarted and lost the new region state - if (current == null) { - regionStates.createRegionState( - hri, State.MERGING_NEW, serverName, null); - } - - // Just return in case of retrying - if (regionStates.isRegionOnServer(hri, serverName)) { - return null; - } - - try { - regionStates.mergeRegions(hri, a, b, serverName); - } catch (IOException ioe) { - LOG.info("Failed to record merged region " + hri.getShortNameToLog()); - return "Failed to record the merging in meta"; - } - return null; - } - - public void assignMergedRegion( - final HRegionInfo mergedRegion, - final HRegionInfo daughterAHRI, - final HRegionInfo daughterBHRI) throws InterruptedException, IOException { - //Offline the daughter regions - regionOffline(daughterAHRI, State.MERGED); - regionOffline(daughterBHRI, State.MERGED); - - //Set merged region to offline - regionStates.prepareAssignMergedRegion(mergedRegion); - - // Assign merged region - invokeAssign(mergedRegion); - - Callable mergeReplicasCallable = new Callable() { - @Override - public Object call() { - doMergingOfReplicas(mergedRegion, daughterAHRI, daughterBHRI); - return null; - } - }; - threadPoolExecutorService.submit(mergeReplicasCallable); - - // wait for assignment completion - ArrayList regionAssignSet = new ArrayList(1); - regionAssignSet.add(mergedRegion); - while (!waitForAssignment(regionAssignSet, true, regionAssignSet.size(), Long.MAX_VALUE)) { - LOG.debug("The merged region " + mergedRegion + " is still in transition. "); - } - - regionStateListener.onRegionMerged(mergedRegion); - } - - private String onRegionMerged(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be in merging_new state, and the daughters must be - // merging on this server. - // If current state is already opened on the same server, - // it could be a reportRegionTransition RPC retry. - if (current == null || !current.isMergingNewOrOpenedOnServer(serverName)) { - return hri.getShortNameToLog() + " is not merging on " + serverName; - } - - // Just return in case of retrying - if (current.isOpened()) { - return null; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a == null || !rs_a.isMergingOnServer(serverName) - || rs_b == null || !rs_b.isMergingOnServer(serverName)) { - return "Some daughter is not known to be merging on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - regionOffline(a, State.MERGED); - regionOffline(b, State.MERGED); - regionOnline(hri, serverName, 1); - - try { - processFavoredNodesForMerge(hri, a, b); - } catch (IOException e) { - LOG.error("Error while processing favored nodes after merge.", e); - return StringUtils.stringifyException(e); - } - - // User could disable the table before master knows the new region. - if (getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - invokeUnAssign(hri); - } else { - Callable mergeReplicasCallable = new Callable() { - @Override - public Object call() { - doMergingOfReplicas(hri, a, b); - return null; - } - }; - threadPoolExecutorService.submit(mergeReplicasCallable); - } - return null; - } - - private String onRegionMergeReverted(final RegionState current, final HRegionInfo hri, - final ServerName serverName, final RegionStateTransition transition) { - // The region must be in merging_new state, and the daughters must be - // merging on this server. - // If the region is in offline state, it could be an RPC retry. - if (current == null || !current.isMergingNewOrOfflineOnServer(serverName)) { - return hri.getShortNameToLog() + " is not merging on " + serverName; - } - - // Just return in case of retrying - if (current.isOffline()) { - return null; - } - - final HRegionInfo a = HRegionInfo.convert(transition.getRegionInfo(1)); - final HRegionInfo b = HRegionInfo.convert(transition.getRegionInfo(2)); - RegionState rs_a = regionStates.getRegionState(a); - RegionState rs_b = regionStates.getRegionState(b); - if (rs_a == null || !rs_a.isMergingOnServer(serverName) - || rs_b == null || !rs_b.isMergingOnServer(serverName)) { - return "Some daughter is not known to be merging on " + serverName - + ", a=" + rs_a + ", b=" + rs_b; - } - - // Always bring the children back online. Even if they are not offline - // there's no harm in making them online again. - regionOnline(a, serverName); - regionOnline(b, serverName); - - // Only offline the merging region if it is known to exist. - RegionState rs_p = regionStates.getRegionState(hri); - if (rs_p != null) { - regionOffline(hri); - } - - if (getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - invokeUnAssign(a); - invokeUnAssign(b); - } - return null; - } - - private void doMergingOfReplicas(HRegionInfo mergedHri, final HRegionInfo hri_a, - final HRegionInfo hri_b) { - // Close replicas for the original unmerged regions. create/assign new replicas - // for the merged parent. - List unmergedRegions = new ArrayList(); - unmergedRegions.add(hri_a); - unmergedRegions.add(hri_b); - Map> map = regionStates.getRegionAssignments(unmergedRegions); - Collection> c = map.values(); - for (List l : c) { - for (HRegionInfo h : l) { - if (!RegionReplicaUtil.isDefaultReplica(h)) { - LOG.debug("Unassigning un-merged replica " + h); - unassign(h); - } - } - } - int numReplicas = getNumReplicas(server, mergedHri.getTable()); - List regions = new ArrayList(); - for (int i = 1; i < numReplicas; i++) { - regions.add(RegionReplicaUtil.getRegionInfoForReplica(mergedHri, i)); - } - try { - assign(regions); - } catch (IOException ioe) { - LOG.warn("Couldn't assign all replica(s) of region " + mergedHri + " because of " + - ioe.getMessage()); - } catch (InterruptedException ie) { - LOG.warn("Couldn't assign all replica(s) of region " + mergedHri+ " because of " + - ie.getMessage()); - } - } - - private void doSplittingOfReplicas(final HRegionInfo parentHri, final HRegionInfo hri_a, - final HRegionInfo hri_b) { - // create new regions for the replica, and assign them to match with the - // current replica assignments. If replica1 of parent is assigned to RS1, - // the replica1s of daughters will be on the same machine - int numReplicas = getNumReplicas(server, parentHri.getTable()); - // unassign the old replicas - List parentRegion = new ArrayList(); - parentRegion.add(parentHri); - Map> currentAssign = - regionStates.getRegionAssignments(parentRegion); - Collection> c = currentAssign.values(); - for (List l : c) { - for (HRegionInfo h : l) { - if (!RegionReplicaUtil.isDefaultReplica(h)) { - LOG.debug("Unassigning parent's replica " + h); - unassign(h); - } - } - } - // assign daughter replicas - Map map = new HashMap(); - for (int i = 1; i < numReplicas; i++) { - prepareDaughterReplicaForAssignment(hri_a, parentHri, i, map); - prepareDaughterReplicaForAssignment(hri_b, parentHri, i, map); - } - try { - assign(map); - } catch (IOException e) { - LOG.warn("Caught exception " + e + " while trying to assign replica(s) of daughter(s)"); - } catch (InterruptedException e) { - LOG.warn("Caught exception " + e + " while trying to assign replica(s) of daughter(s)"); - } - } - - private void prepareDaughterReplicaForAssignment(HRegionInfo daughterHri, HRegionInfo parentHri, - int replicaId, Map map) { - HRegionInfo parentReplica = RegionReplicaUtil.getRegionInfoForReplica(parentHri, replicaId); - HRegionInfo daughterReplica = RegionReplicaUtil.getRegionInfoForReplica(daughterHri, - replicaId); - LOG.debug("Created replica region for daughter " + daughterReplica); - ServerName sn; - if ((sn = regionStates.getRegionServerOfRegion(parentReplica)) != null) { - map.put(daughterReplica, sn); - } else { - List servers = serverManager.getOnlineServersList(); - sn = servers.get((new Random(System.currentTimeMillis())).nextInt(servers.size())); - map.put(daughterReplica, sn); - } - } - - public Set getReplicasToClose() { - return replicasToClose; - } - - public Map getFailedOpenTracker() {return failedOpenTracker;} - - /** - * A region is offline. The new state should be the specified one, - * if not null. If the specified state is null, the new state is Offline. - * The specified state can be Split/Merged/Offline/null only. - */ - private void regionOffline(final HRegionInfo regionInfo, final State state) { - regionStates.regionOffline(regionInfo, state); - removeClosedRegion(regionInfo); - // remove the region plan as well just in case. - clearRegionPlan(regionInfo); - balancer.regionOffline(regionInfo); - - // Tell our listeners that a region was closed - sendRegionClosedNotification(regionInfo); - // also note that all the replicas of the primary should be closed - if (state != null && state.equals(State.SPLIT)) { - Collection c = new ArrayList(1); - c.add(regionInfo); - Map> map = regionStates.getRegionAssignments(c); - Collection> allReplicas = map.values(); - for (List list : allReplicas) { - replicasToClose.addAll(list); - } - } - else if (state != null && state.equals(State.MERGED)) { - Collection c = new ArrayList(1); - c.add(regionInfo); - Map> map = regionStates.getRegionAssignments(c); - Collection> allReplicas = map.values(); - for (List list : allReplicas) { - replicasToClose.addAll(list); - } - } - } - - private void sendRegionOpenedNotification(final HRegionInfo regionInfo, - final ServerName serverName) { - if (!this.listeners.isEmpty()) { - for (AssignmentListener listener : this.listeners) { - listener.regionOpened(regionInfo, serverName); - } - } - } - - private void sendRegionClosedNotification(final HRegionInfo regionInfo) { - if (!this.listeners.isEmpty()) { - for (AssignmentListener listener : this.listeners) { - listener.regionClosed(regionInfo); - } - } - } - - /** - * Try to update some region states. If the state machine prevents - * such update, an error message is returned to explain the reason. - * - * It's expected that in each transition there should have just one - * region for opening/closing, 3 regions for splitting/merging. - * These regions should be on the server that requested the change. - * - * Region state machine. Only these transitions - * are expected to be triggered by a region server. - * - * On the state transition: - * (1) Open/Close should be initiated by master - * (a) Master sets the region to pending_open/pending_close - * in memory and hbase:meta after sending the request - * to the region server - * (b) Region server reports back to the master - * after open/close is done (either success/failure) - * (c) If region server has problem to report the status - * to master, it must be because the master is down or some - * temporary network issue. Otherwise, the region server should - * abort since it must be a bug. If the master is not accessible, - * the region server should keep trying until the server is - * stopped or till the status is reported to the (new) master - * (d) If region server dies in the middle of opening/closing - * a region, SSH picks it up and finishes it - * (e) If master dies in the middle, the new master recovers - * the state during initialization from hbase:meta. Region server - * can report any transition that has not been reported to - * the previous active master yet - * (2) Split/merge is initiated by region servers - * (a) To split a region, a region server sends a request - * to master to try to set a region to splitting, together with - * two daughters (to be created) to splitting new. If approved - * by the master, the splitting can then move ahead - * (b) To merge two regions, a region server sends a request to - * master to try to set the new merged region (to be created) to - * merging_new, together with two regions (to be merged) to merging. - * If it is ok with the master, the merge can then move ahead - * (c) Once the splitting/merging is done, the region server - * reports the status back to the master either success/failure. - * (d) Other scenarios should be handled similarly as for - * region open/close - */ - public String onRegionTransition(final ServerName serverName, - final RegionStateTransition transition) { - TransitionCode code = transition.getTransitionCode(); - HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0)); - Lock lock = locker.acquireLock(hri.getEncodedName()); - try { - RegionState current = regionStates.getRegionState(hri); - if (LOG.isDebugEnabled()) { - LOG.debug("Got transition " + code + " for " - + (current != null ? current.toString() : hri.getShortNameToLog()) - + " from " + serverName); - } - String errorMsg = null; - switch (code) { - case OPENED: - errorMsg = onRegionOpen(current, hri, serverName, transition); - break; - case FAILED_OPEN: - errorMsg = onRegionFailedOpen(current, hri, serverName); - break; - case CLOSED: - errorMsg = onRegionClosed(current, hri, serverName); - break; - case READY_TO_SPLIT: - try { - regionStateListener.onRegionSplit(hri); - errorMsg = onRegionReadyToSplit(current, hri, serverName, transition); - } catch (IOException exp) { - if (exp instanceof QuotaExceededException) { - server.getRegionNormalizer().planSkipped(hri, PlanType.SPLIT); - } - errorMsg = StringUtils.stringifyException(exp); - } - break; - case SPLIT_PONR: - errorMsg = onRegionSplitPONR(current, hri, serverName, transition); - break; - case SPLIT: - errorMsg = onRegionSplit(current, hri, serverName, transition); - break; - case SPLIT_REVERTED: - errorMsg = onRegionSplitReverted(current, hri, serverName, transition); - if (org.apache.commons.lang.StringUtils.isEmpty(errorMsg)) { - try { - regionStateListener.onRegionSplitReverted(hri); - } catch (IOException exp) { - LOG.warn(StringUtils.stringifyException(exp)); - } - } - break; - case READY_TO_MERGE: - errorMsg = onRegionReadyToMerge(current, hri, serverName, transition); - break; - case MERGE_PONR: - errorMsg = onRegionMergePONR(current, hri, serverName, transition); - break; - case MERGED: - try { - errorMsg = onRegionMerged(current, hri, serverName, transition); - regionStateListener.onRegionMerged(hri); - } catch (IOException exp) { - errorMsg = StringUtils.stringifyException(exp); - } - break; - case MERGE_REVERTED: - errorMsg = onRegionMergeReverted(current, hri, serverName, transition); - break; - - default: - errorMsg = "Unexpected transition code " + code; - } - if (errorMsg != null) { - LOG.info("Could not transition region from " + current + " on " - + code + " by " + serverName + ": " + errorMsg); - } - return errorMsg; - } finally { - lock.unlock(); - } - } - - private void processBogusAssignments(Map> bulkPlan) { - if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { - // Found no plan for some regions, put those regions in RIT - for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { - regionStates.updateRegionState(hri, State.FAILED_OPEN); - } - bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); - } - } - - /** - * @return Instance of load balancer - */ - public LoadBalancer getBalancer() { - return this.balancer; - } - - public Map> - getSnapShotOfAssignment(Collection infos) { - return getRegionStates().getRegionAssignments(infos); - } - - void setRegionStateListener(RegionStateListener listener) { - this.regionStateListener = listener; - } - - private class DelayedAssignCallable implements Runnable { - Callable callable; - public DelayedAssignCallable(Callable callable) { - this.callable = callable; - } - - @Override - public void run() { - threadPoolExecutorService.submit(callable); - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java deleted file mode 100644 index 929cd4e..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkAssigner.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import java.lang.Thread.UncaughtExceptionHandler; -import java.util.concurrent.Executors; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.Server; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -/** - * Base class used bulk assigning and unassigning regions. - * Encapsulates a fixed size thread pool of executors to run assignment/unassignment. - * Implement {@link #populatePool(java.util.concurrent.ExecutorService)} and - * {@link #waitUntilDone(long)}. The default implementation of - * the {@link #getUncaughtExceptionHandler()} is to abort the hosting - * Server. - */ -@InterfaceAudience.Private -public abstract class BulkAssigner { - protected final Server server; - - /** - * @param server An instance of Server - */ - public BulkAssigner(final Server server) { - this.server = server; - } - - /** - * @return What to use for a thread prefix when executor runs. - */ - protected String getThreadNamePrefix() { - return this.server.getServerName() + "-" + this.getClass().getName(); - } - - protected UncaughtExceptionHandler getUncaughtExceptionHandler() { - return new UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - // Abort if exception of any kind. - server.abort("Uncaught exception in " + t.getName(), e); - } - }; - } - - protected int getThreadCount() { - return this.server.getConfiguration(). - getInt("hbase.bulk.assignment.threadpool.size", 20); - } - - protected long getTimeoutOnRIT() { - return this.server.getConfiguration(). - getLong("hbase.bulk.assignment.waiton.empty.rit", 5 * 60 * 1000); - } - - protected abstract void populatePool( - final java.util.concurrent.ExecutorService pool) throws IOException; - - public boolean bulkAssign() throws InterruptedException, IOException { - return bulkAssign(true); - } - - /** - * Run the bulk assign. - * - * @param sync - * Whether to assign synchronously. - * @throws InterruptedException - * @return True if done. - * @throws IOException - */ - public boolean bulkAssign(boolean sync) throws InterruptedException, - IOException { - boolean result = false; - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setDaemon(true); - builder.setNameFormat(getThreadNamePrefix() + "-%1$d"); - builder.setUncaughtExceptionHandler(getUncaughtExceptionHandler()); - int threadCount = getThreadCount(); - java.util.concurrent.ExecutorService pool = - Executors.newFixedThreadPool(threadCount, builder.build()); - try { - populatePool(pool); - // How long to wait on empty regions-in-transition. If we timeout, the - // RIT monitor should do fixup. - if (sync) result = waitUntilDone(getTimeoutOnRIT()); - } finally { - // We're done with the pool. It'll exit when its done all in queue. - pool.shutdown(); - } - return result; - } - - /** - * Wait until bulk assign is done. - * @param timeout How long to wait. - * @throws InterruptedException - * @return True if the condition we were waiting on happened. - */ - protected abstract boolean waitUntilDone(final long timeout) - throws InterruptedException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java deleted file mode 100644 index 606dce4..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/BulkReOpen.java +++ /dev/null @@ -1,136 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutorService; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; - -/** - * Performs bulk reopen of the list of regions provided to it. - */ -@InterfaceAudience.Private -public class BulkReOpen extends BulkAssigner { - private final Map> rsToRegions; - private final AssignmentManager assignmentManager; - private static final Log LOG = LogFactory.getLog(BulkReOpen.class); - - public BulkReOpen(final Server server, - final Map> serverToRegions, - final AssignmentManager am) { - super(server); - this.assignmentManager = am; - this.rsToRegions = serverToRegions; - } - - /** - * Unassign all regions, so that they go through the regular region - * assignment flow (in assignment manager) and are re-opened. - */ - @Override - protected void populatePool(ExecutorService pool) { - LOG.debug("Creating threads for each region server "); - for (Map.Entry> e : rsToRegions - .entrySet()) { - final List hris = e.getValue(); - // add plans for the regions that need to be reopened - Map plans = new HashMap(); - for (HRegionInfo hri : hris) { - RegionPlan reOpenPlan = assignmentManager.getRegionReopenPlan(hri); - plans.put(hri.getEncodedName(), reOpenPlan); - } - assignmentManager.addPlans(plans); - pool.execute(new Runnable() { - public void run() { - try { - unassign(hris); - } catch (Throwable t) { - LOG.warn("Failed bulking re-open " + hris.size() - + " region(s)", t); - } - } - }); - } - } - - /** - * Reopen the regions asynchronously, so always returns true immediately. - * @return true - */ - @Override - protected boolean waitUntilDone(long timeout) { - return true; - } - - /** - * Configuration knobs "hbase.bulk.reopen.threadpool.size" number of regions - * that can be reopened concurrently. The maximum number of threads the master - * creates is never more than the number of region servers. - * If configuration is not defined it defaults to 20 - */ - protected int getThreadCount() { - int defaultThreadCount = super.getThreadCount(); - return this.server.getConfiguration().getInt( - "hbase.bulk.reopen.threadpool.size", defaultThreadCount); - } - - public boolean bulkReOpen() throws InterruptedException, IOException { - return bulkAssign(); - } - - /** - * Unassign the list of regions. Configuration knobs: - * hbase.bulk.waitbetween.reopen indicates the number of milliseconds to - * wait before unassigning another region from this region server - * - * @param regions - * @throws InterruptedException - */ - private void unassign( - List regions) throws InterruptedException { - int waitTime = this.server.getConfiguration().getInt( - "hbase.bulk.waitbetween.reopen", 0); - RegionStates regionStates = assignmentManager.getRegionStates(); - for (HRegionInfo region : regions) { - if (server.isStopped()) { - return; - } - if (regionStates.isRegionInTransition(region)) { - continue; - } - assignmentManager.unassign(region); - while (regionStates.isRegionInTransition(region) - && !server.isStopped()) { - regionStates.waitForUpdate(100); - } - if (waitTime > 0 && !server.isStopped()) { - Thread.sleep(waitTime); - } - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index ef042af..39d7d7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; @@ -112,7 +113,7 @@ public class CatalogJanitor extends ScheduledChore { && !this.services.isInMaintenanceMode() && am != null && am.isFailoverCleanupDone() - && am.getRegionStates().getRegionsInTransition().isEmpty()) { + && am.hasRegionsInTransition()) { scan(); } else { LOG.warn("CatalogJanitor disabled! Not running scan."); @@ -471,4 +472,4 @@ public class CatalogJanitor extends ScheduledChore { return cleanMergeRegion(region, mergeRegions.getFirst(), mergeRegions.getSecond()); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java deleted file mode 100644 index 43ea523..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.lang.Thread.UncaughtExceptionHandler; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; - -/** - * Run bulk assign. Does one RCP per regionserver passing a - * batch of regions using {@link GeneralBulkAssigner.SingleServerBulkAssigner}. - */ -@InterfaceAudience.Private -public class GeneralBulkAssigner extends BulkAssigner { - private static final Log LOG = LogFactory.getLog(GeneralBulkAssigner.class); - - private Map> failedPlans - = new ConcurrentHashMap>(); - private ExecutorService pool; - - final Map> bulkPlan; - final AssignmentManager assignmentManager; - final boolean waitTillAllAssigned; - - public GeneralBulkAssigner(final Server server, - final Map> bulkPlan, - final AssignmentManager am, final boolean waitTillAllAssigned) { - super(server); - this.bulkPlan = bulkPlan; - this.assignmentManager = am; - this.waitTillAllAssigned = waitTillAllAssigned; - } - - @Override - protected String getThreadNamePrefix() { - return this.server.getServerName() + "-GeneralBulkAssigner"; - } - - @Override - protected void populatePool(ExecutorService pool) { - this.pool = pool; // shut it down later in case some assigner hangs - for (Map.Entry> e: this.bulkPlan.entrySet()) { - pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue(), - this.assignmentManager, this.failedPlans)); - } - } - - /** - * - * @param timeout How long to wait. - * @return true if done. - */ - @Override - protected boolean waitUntilDone(final long timeout) - throws InterruptedException { - Set regionSet = new HashSet(); - for (List regionList : bulkPlan.values()) { - regionSet.addAll(regionList); - } - - pool.shutdown(); // no more task allowed - int serverCount = bulkPlan.size(); - int regionCount = regionSet.size(); - long startTime = System.currentTimeMillis(); - long rpcWaitTime = startTime + timeout; - while (!server.isStopped() && !pool.isTerminated() - && rpcWaitTime > System.currentTimeMillis()) { - if (failedPlans.isEmpty()) { - pool.awaitTermination(100, TimeUnit.MILLISECONDS); - } else { - reassignFailedPlans(); - } - } - if (!pool.isTerminated()) { - LOG.warn("bulk assigner is still running after " - + (System.currentTimeMillis() - startTime) + "ms, shut it down now"); - // some assigner hangs, can't wait any more, shutdown the pool now - List notStarted = pool.shutdownNow(); - if (notStarted != null && !notStarted.isEmpty()) { - server.abort("some single server assigner hasn't started yet" - + " when the bulk assigner timed out", null); - return false; - } - } - - int reassigningRegions = 0; - if (!failedPlans.isEmpty() && !server.isStopped()) { - reassigningRegions = reassignFailedPlans(); - } - assignmentManager.waitForAssignment(regionSet, waitTillAllAssigned, - reassigningRegions, Math.max(System.currentTimeMillis(), rpcWaitTime)); - - if (LOG.isDebugEnabled()) { - long elapsedTime = System.currentTimeMillis() - startTime; - String status = "successfully"; - if (!regionSet.isEmpty()) { - status = "with " + regionSet.size() + " regions still in transition"; - } - LOG.debug("bulk assigning total " + regionCount + " regions to " - + serverCount + " servers, took " + elapsedTime + "ms, " + status); - } - return regionSet.isEmpty(); - } - - @Override - protected long getTimeoutOnRIT() { - // Guess timeout. Multiply the max number of regions on a server - // by how long we think one region takes opening. - Configuration conf = server.getConfiguration(); - long perRegionOpenTimeGuesstimate = - conf.getLong("hbase.bulk.assignment.perregion.open.time", 1000); - int maxRegionsPerServer = 1; - for (List regionList : bulkPlan.values()) { - int size = regionList.size(); - if (size > maxRegionsPerServer) { - maxRegionsPerServer = size; - } - } - long timeout = perRegionOpenTimeGuesstimate * maxRegionsPerServer - + conf.getLong("hbase.regionserver.rpc.startup.waittime", 60000) - + conf.getLong("hbase.bulk.assignment.perregionserver.rpc.waittime", - 30000) * bulkPlan.size(); - LOG.debug("Timeout-on-RIT=" + timeout); - return timeout; - } - - @Override - protected UncaughtExceptionHandler getUncaughtExceptionHandler() { - return new UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - LOG.warn("Assigning regions in " + t.getName(), e); - } - }; - } - - private int reassignFailedPlans() { - List reassigningRegions = new ArrayList(); - for (Map.Entry> e : failedPlans.entrySet()) { - LOG.info("Failed assigning " + e.getValue().size() - + " regions to server " + e.getKey() + ", reassigning them"); - reassigningRegions.addAll(failedPlans.remove(e.getKey())); - } - RegionStates regionStates = assignmentManager.getRegionStates(); - for (HRegionInfo region : reassigningRegions) { - if (!regionStates.isRegionOnline(region)) { - assignmentManager.invokeAssign(region); - } - } - return reassigningRegions.size(); - } - - /** - * Manage bulk assigning to a server. - */ - static class SingleServerBulkAssigner implements Runnable { - private final ServerName regionserver; - private final List regions; - private final AssignmentManager assignmentManager; - private final Map> failedPlans; - - SingleServerBulkAssigner(final ServerName regionserver, - final List regions, final AssignmentManager am, - final Map> failedPlans) { - this.regionserver = regionserver; - this.regions = regions; - this.assignmentManager = am; - this.failedPlans = failedPlans; - } - - @Override - public void run() { - try { - if (!assignmentManager.assign(regionserver, regions)) { - failedPlans.put(regionserver, regions); - } - } catch (Throwable t) { - LOG.warn("Failed bulking assigning " + regions.size() - + " region(s) to " + regionserver.getServerName() - + ", and continue to bulk assign others", t); - failedPlans.put(regionserver, regions); - } - } - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c4a4af9..04a210a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -65,8 +65,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ProcedureInfo; -import org.apache.hadoop.hbase.RegionStateListener; -import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; @@ -74,6 +72,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Result; @@ -334,7 +335,6 @@ public class HMaster extends HRegionServer implements MasterServices { private RegionNormalizerChore normalizerChore; private ClusterStatusChore clusterStatusChore; private ClusterStatusPublisher clusterStatusPublisherChore = null; - private PeriodicDoMetrics periodicDoMetricsChore = null; CatalogJanitor catalogJanitorChore; private ReplicationMetaCleaner replicationMetaCleaner; @@ -433,19 +433,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - private static class PeriodicDoMetrics extends ScheduledChore { - private final HMaster server; - public PeriodicDoMetrics(int doMetricsInterval, final HMaster server) { - super(server.getServerName() + "-DoMetricsChore", server, doMetricsInterval); - this.server = server; - } - - @Override - protected void chore() { - server.doMetrics(); - } - } - /** * Initializes the HMaster. The steps are as follows: *

@@ -648,20 +635,6 @@ public class HMaster extends HRegionServer implements MasterServices { return MasterDumpServlet.class; } - /** - * Emit the HMaster metrics, such as region in transition metrics. - * Surrounding in a try block just to be sure metrics doesn't abort HMaster. - */ - private void doMetrics() { - try { - if (assignmentManager != null) { - assignmentManager.updateRegionsInTransitionMetrics(); - } - } catch (Throwable e) { - LOG.error("Couldn't update metrics: " + e.getMessage()); - } - } - MetricsMaster getMasterMetrics() { return metricsMaster; } @@ -684,8 +657,9 @@ public class HMaster extends HRegionServer implements MasterServices { this.splitOrMergeTracker = new SplitOrMergeTracker(zooKeeper, conf, this); this.splitOrMergeTracker.start(); - this.assignmentManager = new AssignmentManager(this, serverManager, - this.balancer, this.service, this.metricsMaster, tableStateManager); + // Create Assignment Manager + this.assignmentManager = new AssignmentManager(this); + this.assignmentManager.start(); this.replicationManager = new ReplicationManager(conf, zooKeeper, this); @@ -869,10 +843,6 @@ public class HMaster extends HRegionServer implements MasterServices { this.catalogJanitorChore = new CatalogJanitor(this); getChoreService().scheduleChore(catalogJanitorChore); - // Do Metrics periodically - periodicDoMetricsChore = new PeriodicDoMetrics(msgInterval, this); - getChoreService().scheduleChore(periodicDoMetricsChore); - status.setStatus("Starting cluster schema service"); initClusterSchemaService(); @@ -885,7 +855,8 @@ public class HMaster extends HRegionServer implements MasterServices { } status.markComplete("Initialization successful"); - LOG.info("Master has completed initialization"); + LOG.info(String.format("Master has completed initialization %.3fsec", + (System.currentTimeMillis() - masterActiveTime) / 1000.0f)); configurationManager.registerObserver(this.balancer); // Set master as 'initialized'. @@ -963,8 +934,8 @@ public class HMaster extends HRegionServer implements MasterServices { // Check zk for region servers that are up but didn't register for (ServerName sn: this.regionServerTracker.getOnlineServers()) { // The isServerOnline check is opportunistic, correctness is handled inside - if (!this.serverManager.isServerOnline(sn) - && serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) { + if (!this.serverManager.isServerOnline(sn) && + serverManager.checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) { LOG.info("Registered server found up in zk but who has not yet reported in: " + sn); } } @@ -978,14 +949,13 @@ public class HMaster extends HRegionServer implements MasterServices { void initQuotaManager() throws IOException { MasterQuotaManager quotaManager = new MasterQuotaManager(this); - this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager); + this.assignmentManager.setRegionStateListener(quotaManager); quotaManager.start(); this.quotaManager = quotaManager; } boolean isCatalogJanitorEnabled() { - return catalogJanitorChore != null ? - catalogJanitorChore.getEnabled() : false; + return catalogJanitorChore != null ? catalogJanitorChore.getEnabled() : false; } boolean isCleanerChoreEnabled() { @@ -1093,7 +1063,6 @@ public class HMaster extends HRegionServer implements MasterServices { @Override protected void sendShutdownInterrupt() { super.sendShutdownInterrupt(); - stopProcedureExecutor(); } @Override @@ -1118,15 +1087,20 @@ public class HMaster extends HRegionServer implements MasterServices { if (LOG.isDebugEnabled()) { LOG.debug("Stopping service threads"); } + // Clean up and close up shop if (this.logCleaner != null) this.logCleaner.cancel(true); if (this.hfileCleaner != null) this.hfileCleaner.cancel(true); if (this.replicationZKNodeCleanerChore != null) this.replicationZKNodeCleanerChore.cancel(true); if (this.replicationMetaCleaner != null) this.replicationMetaCleaner.cancel(true); if (this.quotaManager != null) this.quotaManager.stop(); + if (this.activeMasterManager != null) this.activeMasterManager.stop(); if (this.serverManager != null) this.serverManager.stop(); if (this.assignmentManager != null) this.assignmentManager.stop(); + + stopProcedureExecutor(); + if (this.walManager != null) this.walManager.stop(); if (this.fileSystemManager != null) this.fileSystemManager.stop(); if (this.mpmHost != null) this.mpmHost.stop("server shutting down."); @@ -1152,16 +1126,20 @@ public class HMaster extends HRegionServer implements MasterServices { MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); procedureStore.start(numThreads); procedureExecutor.start(numThreads, abortOnCorruption); + procEnv.getRemoteDispatcher().start(); } private void stopProcedureExecutor() { if (procedureExecutor != null) { configurationManager.deregisterObserver(procedureExecutor.getEnvironment()); + procedureExecutor.getEnvironment().getRemoteDispatcher().stop(); procedureExecutor.stop(); + procedureExecutor = null; } if (procedureStore != null) { procedureStore.stop(isAborted()); + procedureStore = null; } } @@ -1190,10 +1168,6 @@ public class HMaster extends HRegionServer implements MasterServices { if (this.mobCompactThread != null) { this.mobCompactThread.close(); } - - if (this.periodicDoMetricsChore != null) { - periodicDoMetricsChore.cancel(); - } } /** @@ -1251,7 +1225,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Sleep to next balance plan start time // But if there are zero regions in transition, it can skip sleep to speed up. while (!interrupted && System.currentTimeMillis() < nextBalanceStartTime - && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() != 0) { + && this.assignmentManager.getRegionStates().hasRegionsInTransition()) { try { Thread.sleep(100); } catch (InterruptedException ie) { @@ -1262,7 +1236,7 @@ public class HMaster extends HRegionServer implements MasterServices { // Throttling by max number regions in transition while (!interrupted && maxRegionsInTransition > 0 - && this.assignmentManager.getRegionStates().getRegionsInTransitionCount() + && this.assignmentManager.getRegionStates().getRegionsInTransition().size() >= maxRegionsInTransition && System.currentTimeMillis() <= cutoffTime) { try { // sleep if the number of regions in transition exceeds the limit @@ -1295,13 +1269,12 @@ public class HMaster extends HRegionServer implements MasterServices { synchronized (this.balancer) { // If balance not true, don't run balancer. if (!this.loadBalancerTracker.isBalancerOn()) return false; - // Only allow one balance run at at time. - if (this.assignmentManager.getRegionStates().isRegionsInTransition()) { - Set regionsInTransition = - this.assignmentManager.getRegionStates().getRegionsInTransition(); + // Only allow one balance run at at time. + if (this.assignmentManager.hasRegionsInTransition()) { + List regionsInTransition = assignmentManager.getRegionsInTransition(); // if hbase:meta region is in transition, result of assignment cannot be recorded // ignore the force flag in that case - boolean metaInTransition = assignmentManager.getRegionStates().isMetaRegionInTransition(); + boolean metaInTransition = assignmentManager.isMetaRegionInTransition(); String prefix = force && !metaInTransition ? "R" : "Not r"; LOG.debug(prefix + "unning balancer because " + regionsInTransition.size() + " region(s) in transition: " + org.apache.commons.lang.StringUtils. @@ -1334,7 +1307,7 @@ public class HMaster extends HRegionServer implements MasterServices { //Give the balancer the current cluster state. this.balancer.setClusterStatus(getClusterStatus()); this.balancer.setClusterLoad( - this.assignmentManager.getRegionStates().getAssignmentsByTable(true)); + this.assignmentManager.getRegionStates().getAssignmentsByTable()); for (Entry>> e : assignmentsByTable.entrySet()) { List partialPlans = this.balancer.balanceCluster(e.getKey(), e.getValue()); @@ -1353,7 +1326,7 @@ public class HMaster extends HRegionServer implements MasterServices { for (RegionPlan plan: plans) { LOG.info("balance " + plan); //TODO: bulk assign - this.assignmentManager.balance(plan); + this.assignmentManager.moveAsync(plan); rpCount++; balanceThrottling(balanceStartTime + rpCount * balanceInterval, maxRegionsInTransition, @@ -1517,20 +1490,16 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override - public long splitRegion( - final HRegionInfo regionInfo, - final byte[] splitRow, - final long nonceGroup, - final long nonce) throws IOException { + public long splitRegion(final HRegionInfo regionInfo, final byte[] splitRow, + final long nonceGroup, final long nonce) + throws IOException { checkInitialized(); - return MasterProcedureUtil.submitProcedure( new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { @Override protected void run() throws IOException { getMaster().getMasterCoprocessorHost().preSplitRegion(regionInfo.getTable(), splitRow); - - LOG.info(getClientIdAuditPrefix() + " Split region " + regionInfo); + LOG.info(getClientIdAuditPrefix() + " split " + regionInfo.getRegionNameAsString()); // Execute the operation asynchronously submitProcedure(new SplitTableRegionProcedure(procedureExecutor.getEnvironment(), @@ -1608,7 +1577,7 @@ public class HMaster extends HRegionServer implements MasterServices { serverManager.sendRegionWarmup(rp.getDestination(), hri); LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); - this.assignmentManager.balance(rp); + this.assignmentManager.moveAsync(rp); if (this.cpHost != null) { this.cpHost.postMove(hri, rp.getSource(), rp.getDestination()); } @@ -2380,8 +2349,9 @@ public class HMaster extends HRegionServer implements MasterServices { String clusterId = fileSystemManager != null ? fileSystemManager.getClusterId().toString() : null; - Set regionsInTransition = assignmentManager != null ? - assignmentManager.getRegionStates().getRegionsInTransition() : null; + List regionsInTransition = assignmentManager != null ? + assignmentManager.getRegionStates().getRegionsStateInTransition() : null; + String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null; boolean balancerOn = loadBalancerTracker != null ? loadBalancerTracker.isBalancerOn() : false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 277dcc8..0e86925 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -44,7 +44,9 @@ import edu.umd.cs.findbugs.annotations.Nullable; *

On cluster startup, bulk assignment can be used to determine * locations for all Regions in a cluster. * - *

This classes produces plans for the {@link AssignmentManager} to execute. + *

This classes produces plans for the + * {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} + * to execute. */ @InterfaceAudience.Private public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java index a921ab5..a48444c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java @@ -24,7 +24,6 @@ import java.io.PrintStream; import java.io.PrintWriter; import java.util.Date; import java.util.Map; -import java.util.Set; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; @@ -33,6 +32,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; import org.apache.hadoop.hbase.monitoring.LogMonitoring; import org.apache.hadoop.hbase.monitoring.StateDumpServlet; import org.apache.hadoop.hbase.monitoring.TaskMonitor; @@ -117,9 +118,8 @@ public class MasterDumpServlet extends StateDumpServlet { return; } - Set regionsInTransition = am.getRegionStates().getRegionsInTransition(); - for (RegionState rs : regionsInTransition) { - String rid = rs.getRegion().getRegionNameAsString(); + for (RegionStateNode rs : am.getRegionsInTransition()) { + String rid = rs.getRegionInfo().getEncodedName(); out.println("Region " + rid + ": " + rs.toDescriptiveString()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java index 5e1917b..ce8a1ef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -151,7 +152,9 @@ public class MasterMetaBootstrap { // Work on meta region int assigned = 0; - long timeout = master.getConfiguration().getLong("hbase.catalog.verification.timeout", 1000); + // TODO: Unimplemented + // long timeout = + // master.getConfiguration().getLong("hbase.catalog.verification.timeout", 1000); if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) { status.setStatus("Assigning hbase:meta region"); } else { @@ -160,37 +163,10 @@ public class MasterMetaBootstrap { // Get current meta state from zk. RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper(), replicaId); - HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, - replicaId); - RegionStates regionStates = assignmentManager.getRegionStates(); - regionStates.createRegionState(hri, metaState.getState(), - metaState.getServerName(), null); - - if (!metaState.isOpened() || !master.getMetaTableLocator().verifyMetaRegionLocation( - master.getClusterConnection(), master.getZooKeeper(), timeout, replicaId)) { - ServerName currentMetaServer = metaState.getServerName(); - if (master.getServerManager().isServerOnline(currentMetaServer)) { - if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) { - LOG.info("Meta was in transition on " + currentMetaServer); - } else { - LOG.info("Meta with replicaId " + replicaId + " was in transition on " + - currentMetaServer); - } - assignmentManager.processRegionsInTransition(Collections.singletonList(metaState)); - } else { - if (currentMetaServer != null) { - if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) { - splitMetaLogBeforeAssignment(currentMetaServer); - regionStates.logSplit(HRegionInfo.FIRST_META_REGIONINFO); - previouslyFailedMetaRSs.add(currentMetaServer); - } - } - LOG.info("Re-assigning hbase:meta with replicaId, " + replicaId + - " it was on " + currentMetaServer); - assignmentManager.assignMeta(hri); - } - assigned++; - } + LOG.debug("meta state from zookeeper: " + metaState); + HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( + HRegionInfo.FIRST_META_REGIONINFO, replicaId); + assignmentManager.assignMeta(hri, metaState.getServerName()); if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) { // TODO: should we prevent from using state manager before meta was initialized? @@ -206,7 +182,6 @@ public class MasterMetaBootstrap { master.getMasterWalManager().splitMetaLog(previouslyFailedMetaRSs); } - assignmentManager.setEnabledTable(TableName.META_TABLE_NAME); master.getTableStateManager().start(); // Make sure a hbase:meta location is set. We need to enable SSH here since @@ -214,7 +189,7 @@ public class MasterMetaBootstrap { // by SSH so that system tables can be assigned. // No need to wait for meta is assigned = 0 when meta is just verified. if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableCrashedServerProcessing(assigned != 0); - LOG.info("hbase:meta with replicaId " + replicaId + " assigned=" + assigned + ", location=" + LOG.info("hbase:meta with replicaId " + replicaId + ", location=" + master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper(), replicaId)); status.setStatus("META assigned."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 3beda05..fb5bf16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -32,9 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; @@ -43,6 +41,7 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.client.VersionInfoUtil; import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; @@ -51,6 +50,7 @@ import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.ipc.QosPriority; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.locking.LockProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; @@ -80,7 +80,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Reg import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest; @@ -98,7 +97,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; @@ -270,7 +268,11 @@ public class MasterRpcServices extends RSRpcServices ClusterStatusProtos.ServerLoad sl = request.getLoad(); ServerName serverName = ProtobufUtil.toServerName(request.getServer()); ServerLoad oldLoad = master.getServerManager().getLoad(serverName); - master.getServerManager().regionServerReport(serverName, new ServerLoad(sl)); + ServerLoad newLoad = new ServerLoad(sl); + master.getServerManager().regionServerReport(serverName, newLoad); + int version = VersionInfoUtil.getCurrentClientVersionNumber(); + master.getAssignmentManager().reportOnlineRegions(serverName, + version, newLoad.getRegionsLoad().keySet()); if (sl != null && master.metricsMaster != null) { // Up our metrics. master.metricsMaster.incrementRequests(sl.getTotalNumberOfRequests() @@ -343,25 +345,25 @@ public class MasterRpcServices extends RSRpcServices public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req) throws ServiceException { try { - final byte [] regionName = req.getRegion().getValue().toByteArray(); - RegionSpecifierType type = req.getRegion().getType(); - AssignRegionResponse arr = AssignRegionResponse.newBuilder().build(); - master.checkInitialized(); + + final RegionSpecifierType type = req.getRegion().getType(); if (type != RegionSpecifierType.REGION_NAME) { LOG.warn("assignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME + " actual: " + type); } - RegionStates regionStates = master.getAssignmentManager().getRegionStates(); - HRegionInfo regionInfo = regionStates.getRegionInfo(regionName); - if (regionInfo == null) throw new UnknownRegionException(Bytes.toString(regionName)); + + final byte[] regionName = req.getRegion().getValue().toByteArray(); + final HRegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName); + if (regionInfo == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); + + final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build(); if (master.cpHost != null) { if (master.cpHost.preAssign(regionInfo)) { return arr; } } - LOG.info(master.getClientIdAuditPrefix() - + " assign " + regionInfo.getRegionNameAsString()); + LOG.info(master.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString()); master.getAssignmentManager().assign(regionInfo, true); if (master.cpHost != null) { master.cpHost.postAssign(regionInfo); @@ -372,6 +374,7 @@ public class MasterRpcServices extends RSRpcServices } } + @Override public BalanceResponse balance(RpcController controller, BalanceRequest request) throws ServiceException { @@ -1162,24 +1165,24 @@ public class MasterRpcServices extends RSRpcServices @Override public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) throws ServiceException { - final byte [] regionName = request.getRegion().getValue().toByteArray(); - RegionSpecifierType type = request.getRegion().getType(); - if (type != RegionSpecifierType.REGION_NAME) { - LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME - + " actual: " + type); - } - try { master.checkInitialized(); - Pair pair = - MetaTableAccessor.getRegion(master.getConnection(), regionName); - if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); - HRegionInfo hri = pair.getFirst(); + + final RegionSpecifierType type = request.getRegion().getType(); + if (type != RegionSpecifierType.REGION_NAME) { + LOG.warn("moveRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME + + " actual: " + type); + } + + final byte[] regionName = request.getRegion().getValue().toByteArray(); + final HRegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName); + if (hri == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); + if (master.cpHost != null) { master.cpHost.preRegionOffline(hri); } LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); - master.getAssignmentManager().regionOffline(hri); + master.getAssignmentManager().offlineRegion(hri); if (master.cpHost != null) { master.cpHost.postRegionOffline(hri); } @@ -1320,28 +1323,20 @@ public class MasterRpcServices extends RSRpcServices public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) throws ServiceException { try { - final byte [] regionName = req.getRegion().getValue().toByteArray(); - RegionSpecifierType type = req.getRegion().getType(); - final boolean force = req.getForce(); - UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build(); - master.checkInitialized(); + + final RegionSpecifierType type = req.getRegion().getType(); if (type != RegionSpecifierType.REGION_NAME) { LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME + " actual: " + type); } - Pair pair = - MetaTableAccessor.getRegion(master.getConnection(), regionName); - if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) { - pair = new Pair(HRegionInfo.FIRST_META_REGIONINFO, - master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper())); - } - if (pair == null) { - throw new UnknownRegionException(Bytes.toString(regionName)); - } - if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); - HRegionInfo hri = pair.getFirst(); + final byte[] regionName = req.getRegion().getValue().toByteArray(); + final HRegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName); + if (hri == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); + + final boolean force = req.getForce(); + final UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build(); if (master.cpHost != null) { if (master.cpHost.preUnassign(hri, force)) { return urr; @@ -1353,7 +1348,6 @@ public class MasterRpcServices extends RSRpcServices if (master.cpHost != null) { master.cpHost.postUnassign(hri, force); } - return urr; } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1365,26 +1359,7 @@ public class MasterRpcServices extends RSRpcServices ReportRegionStateTransitionRequest req) throws ServiceException { try { master.checkServiceStarted(); - RegionStateTransition rt = req.getTransition(0); - RegionStates regionStates = master.getAssignmentManager().getRegionStates(); - for (RegionInfo ri : rt.getRegionInfoList()) { - TableName tableName = ProtobufUtil.toTableName(ri.getTableName()); - if (!(TableName.META_TABLE_NAME.equals(tableName) - && regionStates.getRegionState(HRegionInfo.FIRST_META_REGIONINFO) != null) - && !master.getAssignmentManager().isFailoverCleanupDone()) { - // Meta region is assigned before master finishes the - // failover cleanup. So no need this check for it - throw new PleaseHoldException("Master is rebuilding user regions"); - } - } - ServerName sn = ProtobufUtil.toServerName(req.getServer()); - String error = master.getAssignmentManager().onRegionTransition(sn, rt); - ReportRegionStateTransitionResponse.Builder rrtr = - ReportRegionStateTransitionResponse.newBuilder(); - if (error != null) { - rrtr.setErrorMessage(error); - } - return rrtr.build(); + return master.getAssignmentManager().reportRegionStateTransition(req); } catch (IOException ioe) { throw new ServiceException(ioe); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 66758f8..f215983 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index 1f9729c..44250e0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -218,9 +219,7 @@ public class MasterWalManager { } public void splitLog(final ServerName serverName) throws IOException { - Set serverNames = new HashSet(); - serverNames.add(serverName); - splitLog(serverNames); + splitLog(Collections.singleton(serverName)); } /** @@ -228,9 +227,7 @@ public class MasterWalManager { * @param serverName logs belonging to this server will be split */ public void splitMetaLog(final ServerName serverName) throws IOException { - Set serverNames = new HashSet(); - serverNames.add(serverName); - splitMetaLog(serverNames); + splitMetaLog(Collections.singleton(serverName)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java deleted file mode 100644 index 3a2a6d7..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java +++ /dev/null @@ -1,268 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import com.google.common.base.Preconditions; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.MultiHConnection; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.zookeeper.KeeperException; - -/** - * A helper to persist region state in meta. We may change this class - * to StateStore later if we also use it to store other states in meta - */ -@InterfaceAudience.Private -public class RegionStateStore { - private static final Log LOG = LogFactory.getLog(RegionStateStore.class); - - /** The delimiter for meta columns for replicaIds > 0 */ - protected static final char META_REPLICA_ID_DELIMITER = '_'; - - private volatile Region metaRegion; - private volatile boolean initialized; - private MultiHConnection multiHConnection; - private final MasterServices server; - - /** - * Returns the {@link ServerName} from catalog table {@link Result} - * where the region is transitioning. It should be the same as - * {@link MetaTableAccessor#getServerName(Result,int)} if the server is at OPEN state. - * @param r Result to pull the transitioning server name from - * @return A ServerName instance or {@link MetaTableAccessor#getServerName(Result,int)} - * if necessary fields not found or empty. - */ - static ServerName getRegionServer(final Result r, int replicaId) { - Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId)); - if (cell == null || cell.getValueLength() == 0) { - RegionLocations locations = MetaTableAccessor.getRegionLocations(r); - if (locations != null) { - HRegionLocation location = locations.getRegionLocation(replicaId); - if (location != null) { - return location.getServerName(); - } - } - return null; - } - return ServerName.parseServerName(Bytes.toString(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength())); - } - - private static byte[] getServerNameColumn(int replicaId) { - return replicaId == 0 - ? HConstants.SERVERNAME_QUALIFIER - : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId)); - } - - /** - * Pull the region state from a catalog table {@link Result}. - * @param r Result to pull the region state from - * @return the region state, or OPEN if there's no value written. - */ - static State getRegionState(final Result r, int replicaId) { - Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(replicaId)); - if (cell == null || cell.getValueLength() == 0) return State.OPEN; - return State.valueOf(Bytes.toString(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength())); - } - - private static byte[] getStateColumn(int replicaId) { - return replicaId == 0 - ? HConstants.STATE_QUALIFIER - : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId)); - } - - /** - * Check if we should persist a state change in meta. Generally it's - * better to persist all state changes. However, we should not do that - * if the region is not in meta at all. Based on the state and the - * previous state, we can identify if a user region has an entry - * in meta. For example, merged regions are deleted from meta; - * New merging parents, or splitting daughters are - * not created in meta yet. - */ - private boolean shouldPersistStateChange( - HRegionInfo hri, RegionState state, RegionState oldState) { - return !hri.isMetaRegion() && !RegionStates.isOneOfStates( - state, State.MERGING_NEW, State.SPLITTING_NEW, State.MERGED) - && !(RegionStates.isOneOfStates(state, State.OFFLINE) - && RegionStates.isOneOfStates(oldState, State.MERGING_NEW, - State.SPLITTING_NEW, State.MERGED)); - } - - RegionStateStore(final MasterServices server) { - this.server = server; - initialized = false; - } - - void start() throws IOException { - if (server instanceof RegionServerServices) { - metaRegion = ((RegionServerServices)server).getFromOnlineRegions( - HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); - } - // When meta is not colocated on master - if (metaRegion == null) { - Configuration conf = server.getConfiguration(); - // Config to determine the no of HConnections to META. - // A single Connection should be sufficient in most cases. Only if - // you are doing lot of writes (>1M) to META, - // increasing this value might improve the write throughput. - multiHConnection = - new MultiHConnection(conf, conf.getInt("hbase.regionstatestore.meta.connection", 1)); - } - initialized = true; - } - - void stop() { - initialized = false; - if (multiHConnection != null) { - multiHConnection.close(); - } - } - - void updateRegionState(long openSeqNum, - RegionState newState, RegionState oldState) { - try { - HRegionInfo hri = newState.getRegion(); - - // Update meta before checking for initialization. Meta state stored in zk. - if (hri.isMetaRegion()) { - // persist meta state in MetaTableLocator (which in turn is zk storage currently) - try { - MetaTableLocator.setMetaLocation(server.getZooKeeper(), - newState.getServerName(), hri.getReplicaId(), newState.getState()); - return; // Done - } catch (KeeperException e) { - throw new IOException("Failed to update meta ZNode", e); - } - } - - if (!initialized - || !shouldPersistStateChange(hri, newState, oldState)) { - return; - } - - ServerName oldServer = oldState != null ? oldState.getServerName() : null; - ServerName serverName = newState.getServerName(); - State state = newState.getState(); - - int replicaId = hri.getReplicaId(); - Put metaPut = new Put(MetaTableAccessor.getMetaKeyForRegion(hri)); - StringBuilder info = new StringBuilder("Updating hbase:meta row "); - info.append(hri.getRegionNameAsString()).append(" with state=").append(state); - if (serverName != null && !serverName.equals(oldServer)) { - metaPut.addImmutable(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId), - Bytes.toBytes(serverName.getServerName())); - info.append(", sn=").append(serverName); - } - if (openSeqNum >= 0) { - Preconditions.checkArgument(state == State.OPEN - && serverName != null, "Open region should be on a server"); - MetaTableAccessor.addLocation(metaPut, serverName, openSeqNum, -1, replicaId); - info.append(", openSeqNum=").append(openSeqNum); - info.append(", server=").append(serverName); - } - metaPut.addImmutable(HConstants.CATALOG_FAMILY, getStateColumn(replicaId), - Bytes.toBytes(state.name())); - LOG.info(info); - HTableDescriptor descriptor = server.getTableDescriptors().get(hri.getTable()); - boolean serial = false; - if (descriptor != null) { - serial = server.getTableDescriptors().get(hri.getTable()).hasSerialReplicationScope(); - } - boolean shouldPutBarrier = serial && state == State.OPEN; - // Persist the state change to meta - if (metaRegion != null) { - try { - // Assume meta is pinned to master. - // At least, that's what we want. - metaRegion.put(metaPut); - if (shouldPutBarrier) { - Put barrierPut = MetaTableAccessor.makeBarrierPut(hri.getEncodedNameAsBytes(), - openSeqNum, hri.getTable().getName()); - metaRegion.put(barrierPut); - } - return; // Done here - } catch (Throwable t) { - // In unit tests, meta could be moved away by intention - // So, the shortcut is gone. We won't try to establish the - // shortcut any more because we prefer meta to be pinned - // to the master - synchronized (this) { - if (metaRegion != null) { - LOG.info("Meta region shortcut failed", t); - if (multiHConnection == null) { - multiHConnection = new MultiHConnection(server.getConfiguration(), 1); - } - metaRegion = null; - } - } - } - } - // Called when meta is not on master - List list = shouldPutBarrier ? - Arrays.asList(metaPut, MetaTableAccessor.makeBarrierPut(hri.getEncodedNameAsBytes(), - openSeqNum, hri.getTable().getName())) : Collections.singletonList(metaPut); - multiHConnection.processBatchCallback(list, TableName.META_TABLE_NAME, null, null); - - } catch (IOException ioe) { - LOG.error("Failed to persist region state " + newState, ioe); - server.abort("Failed to update region location", ioe); - } - } - - void splitRegion(HRegionInfo p, - HRegionInfo a, HRegionInfo b, ServerName sn, int regionReplication) throws IOException { - MetaTableAccessor.splitRegion(server.getConnection(), p, a, b, sn, regionReplication, - server.getTableDescriptors().get(p.getTable()).hasSerialReplicationScope()); - } - - void mergeRegions(HRegionInfo p, - HRegionInfo a, HRegionInfo b, ServerName sn, int regionReplication) throws IOException { - MetaTableAccessor.mergeRegions(server.getConnection(), p, a, b, sn, regionReplication, - EnvironmentEdgeManager.currentTime(), - server.getTableDescriptors().get(p.getTable()).hasSerialReplicationScope()); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java deleted file mode 100644 index 4125eea..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ /dev/null @@ -1,1184 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.ServerLoad; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Pair; - -/** - * Region state accountant. It holds the states of all regions in the memory. - * In normal scenario, it should match the meta table and the true region states. - * - * This map is used by AssignmentManager to track region states. - */ -@InterfaceAudience.Private -public class RegionStates { - private static final Log LOG = LogFactory.getLog(RegionStates.class); - - public final static RegionStateStampComparator REGION_STATE_COMPARATOR = - new RegionStateStampComparator(); - - // This comparator sorts the RegionStates by time stamp then Region name. - // Comparing by timestamp alone can lead us to discard different RegionStates that happen - // to share a timestamp. - private static class RegionStateStampComparator implements Comparator { - @Override - public int compare(RegionState l, RegionState r) { - return Long.compare(l.getStamp(), r.getStamp()) == 0 ? - Bytes.compareTo(l.getRegion().getRegionName(), r.getRegion().getRegionName()) : - Long.compare(l.getStamp(), r.getStamp()); - } - } - - /** - * Regions currently in transition. - */ - final HashMap regionsInTransition = - new HashMap(); - - /** - * Region encoded name to state map. - * All the regions should be in this map. - */ - private final Map regionStates = - new HashMap(); - - /** - * Holds mapping of table -> region state - */ - private final Map> regionStatesTableIndex = - new HashMap>(); - - /** - * Server to regions assignment map. - * Contains the set of regions currently assigned to a given server. - */ - private final Map> serverHoldings = - new HashMap>(); - - /** - * Maintains the mapping from the default region to the replica regions. - */ - private final Map> defaultReplicaToOtherReplicas = - new HashMap>(); - - /** - * Region to server assignment map. - * Contains the server a given region is currently assigned to. - */ - private final TreeMap regionAssignments = - new TreeMap(); - - /** - * Encoded region name to server assignment map for re-assignment - * purpose. Contains the server a given region is last known assigned - * to, which has not completed log splitting, so not assignable. - * If a region is currently assigned, this server info in this - * map should be the same as that in regionAssignments. - * However the info in regionAssignments is cleared when the region - * is offline while the info in lastAssignments is cleared when - * the region is closed or the server is dead and processed. - */ - private final HashMap lastAssignments = - new HashMap(); - - /** - * Encoded region name to server assignment map for the - * purpose to clean up serverHoldings when a region is online - * on a new server. When the region is offline from the previous - * server, we cleaned up regionAssignments so that it has the - * latest assignment map. But we didn't clean up serverHoldings - * to match the meta. We need this map to find out the old server - * whose serverHoldings needs cleanup, given a moved region. - */ - private final HashMap oldAssignments = - new HashMap(); - - /** - * Map a host port pair string to the latest start code - * of a region server which is known to be dead. It is dead - * to us, but server manager may not know it yet. - */ - private final HashMap deadServers = - new HashMap(); - - /** - * Map a dead servers to the time when log split is done. - * Since log splitting is not ordered, we have to remember - * all processed instances. The map is cleaned up based - * on a configured time. By default, we assume a dead - * server should be done with log splitting in two hours. - */ - private final HashMap processedServers = - new HashMap(); - private long lastProcessedServerCleanTime; - - private final TableStateManager tableStateManager; - private final RegionStateStore regionStateStore; - private final ServerManager serverManager; - private final MasterServices server; - - // The maximum time to keep a log split info in region states map - static final String LOG_SPLIT_TIME = "hbase.master.maximum.logsplit.keeptime"; - static final long DEFAULT_LOG_SPLIT_TIME = 7200000L; // 2 hours - - RegionStates(final MasterServices master, final TableStateManager tableStateManager, - final ServerManager serverManager, final RegionStateStore regionStateStore) { - this.tableStateManager = tableStateManager; - this.regionStateStore = regionStateStore; - this.serverManager = serverManager; - this.server = master; - } - - /** - * @return a copy of the region assignment map - */ - public synchronized Map getRegionAssignments() { - return new TreeMap(regionAssignments); - } - - /** - * Return the replicas (including default) for the regions grouped by ServerName - * @param regions - * @return a pair containing the groupings as a map - */ - synchronized Map> getRegionAssignments( - Collection regions) { - Map> map = new HashMap>(); - for (HRegionInfo region : regions) { - HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(region); - Set allReplicas = defaultReplicaToOtherReplicas.get(defaultReplica); - if (allReplicas != null) { - for (HRegionInfo hri : allReplicas) { - ServerName server = regionAssignments.get(hri); - if (server != null) { - List regionsOnServer = map.get(server); - if (regionsOnServer == null) { - regionsOnServer = new ArrayList(1); - map.put(server, regionsOnServer); - } - regionsOnServer.add(hri); - } - } - } - } - return map; - } - - public synchronized ServerName getRegionServerOfRegion(HRegionInfo hri) { - return regionAssignments.get(hri); - } - - /** - * Get regions in transition and their states - */ - public synchronized Set getRegionsInTransition() { - return new HashSet(regionsInTransition.values()); - } - - public synchronized SortedSet getRegionsInTransitionOrderedByTimestamp() { - final TreeSet rit = new TreeSet(REGION_STATE_COMPARATOR); - for (RegionState rs: regionsInTransition.values()) { - rit.add(rs); - } - return rit; - } - - /** - * Get the number of regions in transition. - */ - public synchronized int getRegionsInTransitionCount() { - return regionsInTransition.size(); - } - - /** - * @return True if specified region in transition. - */ - public synchronized boolean isRegionInTransition(final HRegionInfo hri) { - return regionsInTransition.containsKey(hri.getEncodedName()); - } - - /** - * @return True if specified region in transition. - */ - public synchronized boolean isRegionInTransition(final String encodedName) { - return regionsInTransition.containsKey(encodedName); - } - - /** - * @return True if any region in transition. - */ - public synchronized boolean isRegionsInTransition() { - return !regionsInTransition.isEmpty(); - } - - /** - * @return True if hbase:meta table region is in transition. - */ - public synchronized boolean isMetaRegionInTransition() { - for (RegionState state : regionsInTransition.values()) { - if (state.getRegion().isMetaRegion()) return true; - } - return false; - } - - /** - * @return True if specified region assigned, and not in transition. - */ - public synchronized boolean isRegionOnline(final HRegionInfo hri) { - return !isRegionInTransition(hri) && regionAssignments.containsKey(hri); - } - - /** - * @return True if specified region offline/closed, but not in transition. - * If the region is not in the map, it is offline to us too. - */ - public synchronized boolean isRegionOffline(final HRegionInfo hri) { - return getRegionState(hri) == null || (!isRegionInTransition(hri) - && isRegionInState(hri, State.OFFLINE, State.CLOSED)); - } - - /** - * @return True if specified region is in one of the specified states. - */ - public boolean isRegionInState( - final HRegionInfo hri, final State... states) { - return isRegionInState(hri.getEncodedName(), states); - } - - /** - * @return True if specified region is in one of the specified states. - */ - public boolean isRegionInState( - final String encodedName, final State... states) { - RegionState regionState = getRegionState(encodedName); - return isOneOfStates(regionState, states); - } - - /** - * Wait for the state map to be updated by assignment manager. - */ - public synchronized void waitForUpdate( - final long timeout) throws InterruptedException { - this.wait(timeout); - } - - /** - * Get region transition state - */ - public RegionState getRegionTransitionState(final HRegionInfo hri) { - return getRegionTransitionState(hri.getEncodedName()); - } - - /** - * Get region transition state - */ - public synchronized RegionState - getRegionTransitionState(final String encodedName) { - return regionsInTransition.get(encodedName); - } - - /** - * Add a list of regions to RegionStates. If a region is split - * and offline, its state will be SPLIT. Otherwise, its state will - * be OFFLINE. Region already in RegionStates will be skipped. - */ - public void createRegionStates( - final List hris) { - for (HRegionInfo hri: hris) { - createRegionState(hri); - } - } - - /** - * Add a region to RegionStates. If the region is split - * and offline, its state will be SPLIT. Otherwise, its state will - * be OFFLINE. If it is already in RegionStates, this call has - * no effect, and the original state is returned. - */ - public RegionState createRegionState(final HRegionInfo hri) { - return createRegionState(hri, null, null, null); - } - - /** - * Add a region to RegionStates with the specified state. - * If the region is already in RegionStates, this call has - * no effect, and the original state is returned. - * - * @param hri the region info to create a state for - * @param newState the state to the region in set to - * @param serverName the server the region is transitioning on - * @param lastHost the last server that hosts the region - * @return the current state - */ - public synchronized RegionState createRegionState(final HRegionInfo hri, - State newState, ServerName serverName, ServerName lastHost) { - if (newState == null || (newState == State.OPEN && serverName == null)) { - newState = State.OFFLINE; - } - if (hri.isOffline() && hri.isSplit()) { - newState = State.SPLIT; - serverName = null; - } - String encodedName = hri.getEncodedName(); - RegionState regionState = regionStates.get(encodedName); - if (regionState != null) { - LOG.warn("Tried to create a state for a region already in RegionStates, " - + "used existing: " + regionState + ", ignored new: " + newState); - } else { - regionState = new RegionState(hri, newState, serverName); - putRegionState(regionState); - if (newState == State.OPEN) { - if (!serverName.equals(lastHost)) { - LOG.warn("Open region's last host " + lastHost - + " should be the same as the current one " + serverName - + ", ignored the last and used the current one"); - lastHost = serverName; - } - lastAssignments.put(encodedName, lastHost); - regionAssignments.put(hri, lastHost); - } else if (!isOneOfStates(regionState, State.MERGED, State.SPLIT, State.OFFLINE)) { - regionsInTransition.put(encodedName, regionState); - } - if (lastHost != null && newState != State.SPLIT) { - addToServerHoldings(lastHost, hri); - if (newState != State.OPEN) { - oldAssignments.put(encodedName, lastHost); - } - } - } - return regionState; - } - - private RegionState putRegionState(RegionState regionState) { - HRegionInfo hri = regionState.getRegion(); - String encodedName = hri.getEncodedName(); - TableName table = hri.getTable(); - RegionState oldState = regionStates.put(encodedName, regionState); - Map map = regionStatesTableIndex.get(table); - if (map == null) { - map = new HashMap(); - regionStatesTableIndex.put(table, map); - } - map.put(encodedName, regionState); - return oldState; - } - - /** - * Update a region state. It will be put in transition if not already there. - */ - public RegionState updateRegionState( - final HRegionInfo hri, final State state) { - RegionState regionState = getRegionState(hri.getEncodedName()); - return updateRegionState(hri, state, - regionState == null ? null : regionState.getServerName()); - } - - /** - * Update a region state. It will be put in transition if not already there. - */ - public RegionState updateRegionState( - final HRegionInfo hri, final State state, final ServerName serverName) { - return updateRegionState(hri, state, serverName, HConstants.NO_SEQNUM); - } - - public void regionOnline(final HRegionInfo hri, final ServerName serverName) { - regionOnline(hri, serverName, HConstants.NO_SEQNUM); - } - - /** - * A region is online, won't be in transition any more. - * We can't confirm it is really online on specified region server - * because it hasn't been put in region server's online region list yet. - */ - public void regionOnline(final HRegionInfo hri, final ServerName serverName, long openSeqNum) { - String encodedName = hri.getEncodedName(); - if (!serverManager.isServerOnline(serverName)) { - // This is possible if the region server dies before master gets a - // chance to handle ZK event in time. At this time, if the dead server - // is already processed by SSH, we should ignore this event. - // If not processed yet, ignore and let SSH deal with it. - LOG.warn("Ignored, " + encodedName + " was opened on a dead server: " + serverName); - return; - } - updateRegionState(hri, State.OPEN, serverName, openSeqNum); - - synchronized (this) { - RegionState regionState = regionsInTransition.remove(encodedName); - // When region is online and remove from regionsInTransition, - // update the RIT duration to assignment manager metrics - if (regionState != null && this.server.getAssignmentManager() != null) { - long ritDuration = System.currentTimeMillis() - regionState.getStamp() - + regionState.getRitDuration(); - this.server.getAssignmentManager().getAssignmentManagerMetrics() - .updateRitDuration(ritDuration); - } - ServerName oldServerName = regionAssignments.put(hri, serverName); - if (!serverName.equals(oldServerName)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Onlined " + hri.getShortNameToLog() + " on " + serverName); - } - addToServerHoldings(serverName, hri); - addToReplicaMapping(hri); - if (oldServerName == null) { - oldServerName = oldAssignments.remove(encodedName); - } - if (oldServerName != null - && !oldServerName.equals(serverName) - && serverHoldings.containsKey(oldServerName)) { - LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); - removeFromServerHoldings(oldServerName, hri); - } - } - } - } - - private void addToServerHoldings(ServerName serverName, HRegionInfo hri) { - Set regions = serverHoldings.get(serverName); - if (regions == null) { - regions = new HashSet(); - serverHoldings.put(serverName, regions); - } - regions.add(hri); - } - - private void addToReplicaMapping(HRegionInfo hri) { - HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri); - Set replicas = - defaultReplicaToOtherReplicas.get(defaultReplica); - if (replicas == null) { - replicas = new HashSet(); - defaultReplicaToOtherReplicas.put(defaultReplica, replicas); - } - replicas.add(hri); - } - - private void removeFromServerHoldings(ServerName serverName, HRegionInfo hri) { - Set oldRegions = serverHoldings.get(serverName); - oldRegions.remove(hri); - if (oldRegions.isEmpty()) { - serverHoldings.remove(serverName); - } - } - - private void removeFromReplicaMapping(HRegionInfo hri) { - HRegionInfo defaultReplica = RegionReplicaUtil.getRegionInfoForDefaultReplica(hri); - Set replicas = defaultReplicaToOtherReplicas.get(defaultReplica); - if (replicas != null) { - replicas.remove(hri); - if (replicas.isEmpty()) { - defaultReplicaToOtherReplicas.remove(defaultReplica); - } - } - } - - /** - * A dead server's wals have been split so that all the regions - * used to be open on it can be safely assigned now. Mark them assignable. - */ - public synchronized void logSplit(final ServerName serverName) { - for (Iterator> it - = lastAssignments.entrySet().iterator(); it.hasNext();) { - Map.Entry e = it.next(); - if (e.getValue().equals(serverName)) { - it.remove(); - } - } - long now = System.currentTimeMillis(); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding to log splitting servers " + serverName); - } - processedServers.put(serverName, Long.valueOf(now)); - Configuration conf = server.getConfiguration(); - long obsoleteTime = conf.getLong(LOG_SPLIT_TIME, DEFAULT_LOG_SPLIT_TIME); - // Doesn't have to be very accurate about the clean up time - if (now > lastProcessedServerCleanTime + obsoleteTime) { - lastProcessedServerCleanTime = now; - long cutoff = now - obsoleteTime; - for (Iterator> it - = processedServers.entrySet().iterator(); it.hasNext();) { - Map.Entry e = it.next(); - if (e.getValue().longValue() < cutoff) { - if (LOG.isDebugEnabled()) { - LOG.debug("Removed from log splitting servers " + e.getKey()); - } - it.remove(); - } - } - } - } - - /** - * Log split is done for a given region, so it is assignable now. - */ - public void logSplit(final HRegionInfo region) { - clearLastAssignment(region); - } - - public synchronized void clearLastAssignment(final HRegionInfo region) { - lastAssignments.remove(region.getEncodedName()); - } - - /** - * A region is offline, won't be in transition any more. - */ - public void regionOffline(final HRegionInfo hri) { - regionOffline(hri, null); - } - - /** - * A region is offline, won't be in transition any more. Its state - * should be the specified expected state, which can only be - * Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew. - */ - public void regionOffline( - final HRegionInfo hri, final State expectedState) { - Preconditions.checkArgument(expectedState == null - || RegionState.isUnassignable(expectedState), - "Offlined region should not be " + expectedState); - if (isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) { - // Remove it from all region maps - deleteRegion(hri); - return; - } - State newState = - expectedState == null ? State.OFFLINE : expectedState; - updateRegionState(hri, newState); - String encodedName = hri.getEncodedName(); - synchronized (this) { - regionsInTransition.remove(encodedName); - ServerName oldServerName = regionAssignments.remove(hri); - if (oldServerName != null && serverHoldings.containsKey(oldServerName)) { - if (newState == State.MERGED || newState == State.SPLIT - || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING)) { - // Offline the region only if it's merged/split, or the table is disabled/disabling. - // Otherwise, offline it from this server only when it is online on a different server. - LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); - removeFromServerHoldings(oldServerName, hri); - removeFromReplicaMapping(hri); - } else { - // Need to remember it so that we can offline it from this - // server when it is online on a different server. - oldAssignments.put(encodedName, oldServerName); - } - } - } - } - - /** - * A server is offline, all regions on it are dead. - */ - public List serverOffline(final ServerName sn) { - // Offline all regions on this server not already in transition. - List rits = new ArrayList(); - Set regionsToCleanIfNoMetaEntry = new HashSet(); - // Offline regions outside the loop and synchronized block to avoid - // ConcurrentModificationException and deadlock in case of meta anassigned, - // but RegionState a blocked. - Set regionsToOffline = new HashSet(); - synchronized (this) { - Set assignedRegions = serverHoldings.get(sn); - if (assignedRegions == null) { - assignedRegions = new HashSet(); - } - - for (HRegionInfo region : assignedRegions) { - // Offline open regions, no need to offline if SPLIT/MERGED/OFFLINE - if (isRegionOnline(region)) { - regionsToOffline.add(region); - } else if (isRegionInState(region, State.SPLITTING, State.MERGING)) { - LOG.debug("Offline splitting/merging region " + getRegionState(region)); - regionsToOffline.add(region); - } - } - - for (RegionState state : regionsInTransition.values()) { - HRegionInfo hri = state.getRegion(); - if (assignedRegions.contains(hri)) { - // Region is open on this region server, but in transition. - // This region must be moving away from this server, or splitting/merging. - // SSH will handle it, either skip assigning, or re-assign. - LOG.info("Transitioning " + state + " will be handled by ServerCrashProcedure for " + sn); - } else if (sn.equals(state.getServerName())) { - // Region is in transition on this region server, and this - // region is not open on this server. So the region must be - // moving to this server from another one (i.e. opening or - // pending open on this server, was open on another one. - // Offline state is also kind of pending open if the region is in - // transition. The region could be in failed_close state too if we have - // tried several times to open it while this region server is not reachable) - if (isOneOfStates(state, State.OPENING, State.PENDING_OPEN, - State.FAILED_OPEN, State.FAILED_CLOSE, State.OFFLINE)) { - LOG.info("Found region in " + state + - " to be reassigned by ServerCrashProcedure for " + sn); - rits.add(hri); - } else if (isOneOfStates(state, State.SPLITTING_NEW, State.MERGING_NEW)) { - regionsToCleanIfNoMetaEntry.add(state.getRegion()); - } else { - LOG.warn("THIS SHOULD NOT HAPPEN: unexpected " + state); - } - } - } - this.notifyAll(); - } - - for (HRegionInfo hri : regionsToOffline) { - regionOffline(hri); - } - - cleanIfNoMetaEntry(regionsToCleanIfNoMetaEntry); - return rits; - } - - /** - * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held. - * @param hris The hris to check if empty in hbase:meta and if so, clean them up. - */ - private void cleanIfNoMetaEntry(Set hris) { - if (hris.isEmpty()) return; - for (HRegionInfo hri: hris) { - try { - // This is RPC to meta table. It is done while we have a synchronize on - // regionstates. No progress will be made if meta is not available at this time. - // This is a cleanup task. Not critical. - if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) == - null) { - regionOffline(hri); - FSUtils.deleteRegionDir(server.getConfiguration(), hri); - } - } catch (IOException e) { - LOG.warn("Got exception while deleting " + hri + " directories from file system.", e); - } - } - } - - /** - * Gets the online regions of the specified table. - * This method looks at the in-memory state. It does not go to hbase:meta. - * Only returns online regions. If a region on this table has been - * closed during a disable, etc., it will be included in the returned list. - * So, the returned list may not necessarily be ALL regions in this table, its - * all the ONLINE regions in the table. - * @param tableName - * @return Online regions from tableName - */ - public synchronized List getRegionsOfTable(TableName tableName) { - List tableRegions = new ArrayList(); - // boundary needs to have table's name but regionID 0 so that it is sorted - // before all table's regions. - HRegionInfo boundary = new HRegionInfo(tableName, null, null, false, 0L); - for (HRegionInfo hri: regionAssignments.tailMap(boundary).keySet()) { - if(!hri.getTable().equals(tableName)) break; - tableRegions.add(hri); - } - return tableRegions; - } - - /** - * Gets current state of all regions of the table. - * This method looks at the in-memory state. It does not go to hbase:meta. - * Method guaranteed to return keys for all states - * in {@link org.apache.hadoop.hbase.master.RegionState.State} - * - * @param tableName - * @return Online regions from tableName - */ - public synchronized Map> - getRegionByStateOfTable(TableName tableName) { - Map> tableRegions = - new HashMap>(); - for (State state : State.values()) { - tableRegions.put(state, new ArrayList()); - } - Map indexMap = regionStatesTableIndex.get(tableName); - if (indexMap == null) - return tableRegions; - for (RegionState regionState : indexMap.values()) { - tableRegions.get(regionState.getState()).add(regionState.getRegion()); - } - return tableRegions; - } - - /** - * Wait on region to clear regions-in-transition. - *

- * If the region isn't in transition, returns immediately. Otherwise, method - * blocks until the region is out of transition. - */ - public synchronized void waitOnRegionToClearRegionsInTransition( - final HRegionInfo hri) throws InterruptedException { - if (!isRegionInTransition(hri)) return; - - while(!server.isStopped() && isRegionInTransition(hri)) { - RegionState rs = getRegionState(hri); - LOG.info("Waiting on " + rs + " to clear regions-in-transition"); - waitForUpdate(100); - } - - if (server.isStopped()) { - LOG.info("Giving up wait on region in " + - "transition because stoppable.isStopped is set"); - } - } - - /** - * A table is deleted. Remove its regions from all internal maps. - * We loop through all regions assuming we don't delete tables too much. - */ - public void tableDeleted(final TableName tableName) { - Set regionsToDelete = new HashSet(); - synchronized (this) { - for (RegionState state: regionStates.values()) { - HRegionInfo region = state.getRegion(); - if (region.getTable().equals(tableName)) { - regionsToDelete.add(region); - } - } - } - for (HRegionInfo region: regionsToDelete) { - deleteRegion(region); - } - } - - /** - * Get a copy of all regions assigned to a server - */ - public synchronized Set getServerRegions(ServerName serverName) { - Set regions = serverHoldings.get(serverName); - if (regions == null) return null; - return new HashSet(regions); - } - - /** - * Remove a region from all state maps. - */ - @VisibleForTesting - public synchronized void deleteRegion(final HRegionInfo hri) { - String encodedName = hri.getEncodedName(); - regionsInTransition.remove(encodedName); - regionStates.remove(encodedName); - TableName table = hri.getTable(); - Map indexMap = regionStatesTableIndex.get(table); - indexMap.remove(encodedName); - if (indexMap.isEmpty()) - regionStatesTableIndex.remove(table); - lastAssignments.remove(encodedName); - ServerName sn = regionAssignments.remove(hri); - if (sn != null) { - Set regions = serverHoldings.get(sn); - regions.remove(hri); - } - } - - /** - * Checking if a region was assigned to a server which is not online now. - * If so, we should hold re-assign this region till SSH has split its wals. - * Once logs are split, the last assignment of this region will be reset, - * which means a null last assignment server is ok for re-assigning. - * - * A region server could be dead but we don't know it yet. We may - * think it's online falsely. Therefore if a server is online, we still - * need to confirm it reachable and having the expected start code. - */ - synchronized boolean wasRegionOnDeadServer(final String encodedName) { - ServerName server = lastAssignments.get(encodedName); - return isServerDeadAndNotProcessed(server); - } - - synchronized boolean isServerDeadAndNotProcessed(ServerName server) { - if (server == null) return false; - if (serverManager.isServerOnline(server)) { - String hostAndPort = server.getHostAndPort(); - long startCode = server.getStartcode(); - Long deadCode = deadServers.get(hostAndPort); - if (deadCode == null || startCode > deadCode.longValue()) { - if (serverManager.isServerReachable(server)) { - return false; - } - // The size of deadServers won't grow unbounded. - deadServers.put(hostAndPort, Long.valueOf(startCode)); - } - // Watch out! If the server is not dead, the region could - // remain unassigned. That's why ServerManager#isServerReachable - // should use some retry. - // - // We cache this info since it is very unlikely for that - // instance to come back up later on. We don't want to expire - // the server since we prefer to let it die naturally. - LOG.warn("Couldn't reach online server " + server); - } - // Now, we know it's dead. Check if it's processed - return !processedServers.containsKey(server); - } - - /** - * Get the last region server a region was on for purpose of re-assignment, - * i.e. should the re-assignment be held back till log split is done? - */ - synchronized ServerName getLastRegionServerOfRegion(final String encodedName) { - return lastAssignments.get(encodedName); - } - - synchronized void setLastRegionServerOfRegions( - final ServerName serverName, final List regionInfos) { - for (HRegionInfo hri: regionInfos) { - setLastRegionServerOfRegion(serverName, hri.getEncodedName()); - } - } - - synchronized void setLastRegionServerOfRegion( - final ServerName serverName, final String encodedName) { - lastAssignments.put(encodedName, serverName); - } - - synchronized boolean isRegionOnServer( - final HRegionInfo hri, final ServerName serverName) { - Set regions = serverHoldings.get(serverName); - return regions == null ? false : regions.contains(hri); - } - - public void prepareAssignDaughters(HRegionInfo a, HRegionInfo b) { - synchronized (this) { - if (isRegionInState(a, State.SPLITTING_NEW)) { - updateRegionState(a, State.OFFLINE, null); - } - if (isRegionInState(b, State.SPLITTING_NEW)) { - updateRegionState(b, State.OFFLINE, null); - } - } - } - - public void prepareAssignMergedRegion(HRegionInfo mergedRegion) { - synchronized (this) { - if (isRegionInState(mergedRegion, State.MERGING_NEW)) { - updateRegionState(mergedRegion, State.OFFLINE, null); - } - } - } - - void splitRegion(HRegionInfo p, - HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { - - regionStateStore.splitRegion(p, a, b, sn, getRegionReplication(p)); - synchronized (this) { - // After PONR, split is considered to be done. - // Update server holdings to be aligned with the meta. - Set regions = serverHoldings.get(sn); - if (regions == null) { - throw new IllegalStateException(sn + " should host some regions"); - } - regions.remove(p); - regions.add(a); - regions.add(b); - } - } - - void mergeRegions(HRegionInfo p, - HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { - regionStateStore.mergeRegions(p, a, b, sn, getRegionReplication(a)); - synchronized (this) { - // After PONR, merge is considered to be done. - // Update server holdings to be aligned with the meta. - Set regions = serverHoldings.get(sn); - if (regions == null) { - throw new IllegalStateException(sn + " should host some regions"); - } - regions.remove(a); - regions.remove(b); - regions.add(p); - } - } - - private int getRegionReplication(HRegionInfo r) throws IOException { - if (tableStateManager != null) { - HTableDescriptor htd = server.getTableDescriptors().get(r.getTable()); - if (htd != null) { - return htd.getRegionReplication(); - } - } - return 1; - } - - /** - * At cluster clean re/start, mark all user regions closed except those of tables - * that are excluded, such as disabled/disabling/enabling tables. All user regions - * and their previous locations are returned. - */ - synchronized Map closeAllUserRegions(Set excludedTables) { - boolean noExcludeTables = excludedTables == null || excludedTables.isEmpty(); - Set toBeClosed = new HashSet(regionStates.size()); - for(RegionState state: regionStates.values()) { - HRegionInfo hri = state.getRegion(); - if (state.isSplit() || hri.isSplit()) { - continue; - } - TableName tableName = hri.getTable(); - if (!TableName.META_TABLE_NAME.equals(tableName) - && (noExcludeTables || !excludedTables.contains(tableName))) { - toBeClosed.add(hri); - } - } - Map allUserRegions = - new HashMap(toBeClosed.size()); - for (HRegionInfo hri: toBeClosed) { - RegionState regionState = updateRegionState(hri, State.CLOSED); - allUserRegions.put(hri, regionState.getServerName()); - } - return allUserRegions; - } - - /** - * Compute the average load across all region servers. - * Currently, this uses a very naive computation - just uses the number of - * regions being served, ignoring stats about number of requests. - * @return the average load - */ - protected synchronized double getAverageLoad() { - int numServers = 0, totalLoad = 0; - for (Map.Entry> e: serverHoldings.entrySet()) { - Set regions = e.getValue(); - ServerName serverName = e.getKey(); - int regionCount = regions.size(); - if (serverManager.isServerOnline(serverName)) { - totalLoad += regionCount; - numServers++; - } - } - if (numServers > 1) { - // The master region server holds only a couple regions. - // Don't consider this server in calculating the average load - // if there are other region servers to avoid possible confusion. - Set hris = serverHoldings.get(server.getServerName()); - if (hris != null) { - totalLoad -= hris.size(); - numServers--; - } - } - return numServers == 0 ? 0.0 : - (double)totalLoad / (double)numServers; - } - - protected Map>> getAssignmentsByTable() { - return getAssignmentsByTable(false); - } - - /** - * This is an EXPENSIVE clone. Cloning though is the safest thing to do. - * Can't let out original since it can change and at least the load balancer - * wants to iterate this exported list. We need to synchronize on regions - * since all access to this.servers is under a lock on this.regions. - * @param forceByCluster a flag to force to aggregate the server-load to the cluster level - * @return A clone of current assignments by table. - */ - protected Map>> getAssignmentsByTable( - boolean forceByCluster) { - Map>> result; - synchronized (this) { - result = getTableRSRegionMap(server.getConfiguration().getBoolean( - HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE,false) && !forceByCluster); - } - Map - onlineSvrs = serverManager.getOnlineServers(); - // Take care of servers w/o assignments, and remove servers in draining mode - List drainingServers = this.serverManager.getDrainingServersList(); - for (Map> map: result.values()) { - for (ServerName svr: onlineSvrs.keySet()) { - if (!map.containsKey(svr)) { - map.put(svr, new ArrayList()); - } - } - map.keySet().removeAll(drainingServers); - } - return result; - } - - private Map>> getTableRSRegionMap(Boolean bytable){ - Map>> result = - new HashMap>>(); - for (Map.Entry> e: serverHoldings.entrySet()) { - for (HRegionInfo hri: e.getValue()) { - if (hri.isMetaRegion()) continue; - TableName tablename = bytable ? hri.getTable() : TableName.valueOf(HConstants.ENSEMBLE_TABLE_NAME); - Map> svrToRegions = result.get(tablename); - if (svrToRegions == null) { - svrToRegions = new HashMap>(serverHoldings.size()); - result.put(tablename, svrToRegions); - } - List regions = svrToRegions.get(e.getKey()); - if (regions == null) { - regions = new ArrayList(); - svrToRegions.put(e.getKey(), regions); - } - regions.add(hri); - } - } - return result; - } - - public RegionState getRegionState(final HRegionInfo hri) { - return getRegionState(hri.getEncodedName()); - } - - /** - * Returns a clone of region assignments per server - * @return a Map of ServerName to a List of HRegionInfo's - */ - protected synchronized Map> getRegionAssignmentsByServer() { - Map> regionsByServer = - new HashMap>(serverHoldings.size()); - for (Map.Entry> e: serverHoldings.entrySet()) { - regionsByServer.put(e.getKey(), new ArrayList(e.getValue())); - } - return regionsByServer; - } - - public synchronized RegionState getRegionState(final String encodedName) { - return regionStates.get(encodedName); - } - - /** - * Get the HRegionInfo from cache, if not there, from the hbase:meta table. - * Be careful. Does RPC. Do not hold a lock or synchronize when you call this method. - * @param regionName - * @return HRegionInfo for the region - */ - @SuppressWarnings("deprecation") - protected HRegionInfo getRegionInfo(final byte [] regionName) { - String encodedName = HRegionInfo.encodeRegionName(regionName); - RegionState regionState = getRegionState(encodedName); - if (regionState != null) { - return regionState.getRegion(); - } - - try { - Pair p = - MetaTableAccessor.getRegion(server.getConnection(), regionName); - HRegionInfo hri = p == null ? null : p.getFirst(); - if (hri != null) { - createRegionState(hri); - } - return hri; - } catch (IOException e) { - server.abort("Aborting because error occurred while reading " - + Bytes.toStringBinary(regionName) + " from hbase:meta", e); - return null; - } - } - - static boolean isOneOfStates(RegionState regionState, State... states) { - State s = regionState != null ? regionState.getState() : null; - for (State state: states) { - if (s == state) return true; - } - return false; - } - - /** - * Update a region state. It will be put in transition if not already there. - */ - private RegionState updateRegionState(final HRegionInfo hri, - final RegionState.State state, final ServerName serverName, long openSeqNum) { - if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) { - LOG.warn("Failed to open/close " + hri.getShortNameToLog() - + " on " + serverName + ", set to " + state); - } - - String encodedName = hri.getEncodedName(); - RegionState regionState = new RegionState( - hri, state, System.currentTimeMillis(), serverName); - RegionState oldState = getRegionState(encodedName); - if (!regionState.equals(oldState)) { - LOG.info("Transition " + oldState + " to " + regionState); - // Persist region state before updating in-memory info, if needed - regionStateStore.updateRegionState(openSeqNum, regionState, oldState); - } - - synchronized (this) { - RegionState oldRegionState = regionsInTransition.put(encodedName, regionState); - // When region transform old region state to new region state, - // accumulate the RIT duration to new region state. - if (oldRegionState != null) { - regionState.updateRitDuration(oldRegionState.getStamp()); - } - putRegionState(regionState); - - // For these states, region should be properly closed. - // There should be no log splitting issue. - if ((state == State.CLOSED || state == State.MERGED - || state == State.SPLIT) && lastAssignments.containsKey(encodedName)) { - ServerName last = lastAssignments.get(encodedName); - if (last.equals(serverName)) { - lastAssignments.remove(encodedName); - } else { - LOG.warn(encodedName + " moved to " + state + " on " - + serverName + ", expected " + last); - } - } - - // Once a region is opened, record its last assignment right away. - if (serverName != null && state == State.OPEN) { - ServerName last = lastAssignments.get(encodedName); - if (!serverName.equals(last)) { - lastAssignments.put(encodedName, serverName); - if (last != null && isServerDeadAndNotProcessed(last)) { - LOG.warn(encodedName + " moved to " + serverName - + ", while it's previous host " + last - + " is dead but not processed yet"); - } - } - } - - // notify the change - this.notifyAll(); - } - return regionState; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 5540c70..25d846a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master; import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent; -import com.google.common.annotations.VisibleForTesting; import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; @@ -37,6 +36,7 @@ import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Predicate; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -55,10 +55,7 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; -import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; @@ -83,6 +80,8 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; +import com.google.common.annotations.VisibleForTesting; + /** * The ServerManager class manages info about region servers. *

@@ -315,7 +314,8 @@ public class ServerManager { } } - void regionServerReport(ServerName sn, + @VisibleForTesting + public void regionServerReport(ServerName sn, ServerLoad sl) throws YouAreDeadException { checkIsDead(sn, "REPORT"); if (null == this.onlineServers.replace(sn, sl)) { @@ -610,12 +610,7 @@ public class ServerManager { return; } - boolean carryingMeta = master.getAssignmentManager().isCarryingMeta(serverName); - ProcedureExecutor procExec = this.master.getMasterProcedureExecutor(); - procExec.submitProcedure(new ServerCrashProcedure( - procExec.getEnvironment(), serverName, true, carryingMeta)); - LOG.debug("Added=" + serverName + - " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta); + master.getAssignmentManager().submitServerCrash(serverName, true); // Tell our listeners that a server was removed if (!this.listeners.isEmpty()) { @@ -656,9 +651,7 @@ public class ServerManager { } this.deadservers.add(serverName); - ProcedureExecutor procExec = this.master.getMasterProcedureExecutor(); - procExec.submitProcedure(new ServerCrashProcedure( - procExec.getEnvironment(), serverName, shouldSplitWal, false)); + master.getAssignmentManager().submitServerCrash(serverName, shouldSplitWal); } /** @@ -930,7 +923,7 @@ public class ServerManager { * @throws IOException * @throws RetriesExhaustedException wrapping a ConnectException if failed */ - private AdminService.BlockingInterface getRsAdmin(final ServerName sn) + public AdminService.BlockingInterface getRsAdmin(final ServerName sn) throws IOException { AdminService.BlockingInterface admin = this.rsAdmins.get(sn); if (admin == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 2f06972..471eab6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -313,8 +313,9 @@ public class TableNamespaceManager { } private boolean isTableAssigned() { - return !masterServices.getAssignmentManager() - .getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty(); + // TODO: we have a better way now (wait on event) + return masterServices.getAssignmentManager() + .getRegionStates().hasTableRegionStates(TableName.NAMESPACE_TABLE_NAME); } public void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java deleted file mode 100644 index ccff6f0..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/UnAssignCallable.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; - -/** - * A callable object that invokes the corresponding action that needs to be - * taken for unassignment of a region in transition. Implementing as future - * callable we are able to act on the timeout asynchronously. - */ -@InterfaceAudience.Private -public class UnAssignCallable implements Callable { - private AssignmentManager assignmentManager; - - private HRegionInfo hri; - - public UnAssignCallable(AssignmentManager assignmentManager, HRegionInfo hri) { - this.assignmentManager = assignmentManager; - this.hri = hri; - } - - @Override - public Object call() throws Exception { - assignmentManager.unassign(hri); - return null; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java new file mode 100644 index 0000000..6ccfddb --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java @@ -0,0 +1,269 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionOpenOperation; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; + +/** + * Procedure that describe the assignment of a single region. + * There can only be one RegionTransitionProcedure per region running at a time + * since each procedure takes a lock on the region. + * + *

The Assign starts by pushing the "assign" operation to the AssignmentManager + * and then will go in a "waiting" state. + * The AM will batch the "assign" requests and ask the Balancer where to put + * the region (the various policies will be respected: retain, round-robin, random). + * Once the AM and the balancer have found a place for the region the procedure + * will be resumed and an "open region" request will be placed in the Remote Dispatcher + * queue, and the procedure once again will go in a "waiting state". + * The Remote Dispatcher will batch the various requests for that server and + * they will be sent to the RS for execution. + * The RS will complete the open operation by calling master.reportRegionStateTransition(). + * The AM will intercept the transition report, and notify the procedure. + * The procedure will finish the assignment by publishing to new state on meta + * or it will retry the assignment. + *

This procedure does not rollback when beyond the first + * REGION_TRANSITION_QUEUE step; it will press on trying to assign in the face of + * failure. + */ +@InterfaceAudience.Private +public class AssignProcedure extends RegionTransitionProcedure { + private static final Log LOG = LogFactory.getLog(AssignProcedure.class); + + private ServerName targetServer = null; + private boolean forceNewPlan = false; + + public AssignProcedure() { + // Required by the Procedure framework to create the procedure on replay + super(); + } + + public AssignProcedure(final HRegionInfo regionInfo) { + this(regionInfo, false); + } + + public AssignProcedure(final HRegionInfo regionInfo, final boolean forceNewPlan) { + super(regionInfo); + this.forceNewPlan = forceNewPlan; + this.targetServer = null; + } + + public AssignProcedure(final HRegionInfo regionInfo, final ServerName targetServer) { + super(regionInfo); + this.forceNewPlan = false; + this.targetServer = targetServer; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.ASSIGN; + } + + @Override + protected boolean isRollbackSupported(final RegionTransitionState state) { + switch (state) { + case REGION_TRANSITION_QUEUE: + return true; + default: + return false; + } + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + final AssignRegionStateData.Builder state = AssignRegionStateData.newBuilder() + .setTransitionState(getTransitionState()) + .setRegionInfo(HRegionInfo.convert(getRegionInfo())); + if (forceNewPlan) { + state.setForceNewPlan(true); + } + if (targetServer != null) { + state.setTargetServer(ProtobufUtil.toServerName(targetServer)); + } + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + final AssignRegionStateData state = AssignRegionStateData.parseDelimitedFrom(stream); + setTransitionState(state.getTransitionState()); + setRegionInfo(HRegionInfo.convert(state.getRegionInfo())); + forceNewPlan = state.getForceNewPlan(); + if (state.hasTargetServer()) { + targetServer = ProtobufUtil.toServerName(state.getTargetServer()); + } + } + + @Override + protected boolean startTransition(final MasterProcedureEnv env, final RegionStateNode regionNode) + throws IOException { + LOG.info("Start " + regionNode); + // If the region is already open we can't do much... + if (regionNode.isInState(State.OPEN) && isServerOnline(env, regionNode)) { + LOG.info("Already assigned: " + regionNode); + return false; + } + + // If we haven't started the operation yet, we can abort + if (aborted.get() && regionNode.isInState(State.CLOSED, State.OFFLINE)) { + if (incrementAndCheckMaxAttempts(env, regionNode)) { + regionNode.setState(State.FAILED_OPEN); + setFailure(getClass().getSimpleName(), + new RetriesExhaustedException("Max attempts exceeded")); + } else { + setAbortFailure(getClass().getSimpleName(), "Abort requested"); + } + return false; + } + + // send assign (add into assign-pool). region is now in OFFLINE state + regionNode.setState(State.OFFLINE); + if (forceNewPlan) { + regionNode.setRegionLocation(null); + } else if (targetServer != null) { + regionNode.setRegionLocation(targetServer); + } + + env.getAssignmentManager().queueAssign(regionNode); + return true; + } + + @Override + protected boolean updateTransition(final MasterProcedureEnv env, final RegionStateNode regionNode) + throws IOException, ProcedureSuspendedException { + // TODO: crash if targetServer is specified and not online + // which is also the case when the balancer provided us with a different location. + LOG.info("Update " + regionNode); + if (regionNode.getRegionLocation() == null) { + setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE); + return true; + } + + if (!isServerOnline(env, regionNode)) { + // TODO: is this correct? should we wait the chore/ssh? + LOG.info("Server not online: " + regionNode); + setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE); + return true; + } + + // Wait until server reported. If we have resumed the region may already be assigned. + if (env.getAssignmentManager().waitServerReportEvent(regionNode.getRegionLocation(), this)) { + throw new ProcedureSuspendedException(); + } + + if (regionNode.isInState(State.OPEN)) { + LOG.info("Already assigned: " + regionNode); + return false; + } + + // region is now in OPENING state + env.getAssignmentManager().markRegionAsOpening(regionNode); + + // TODO: Requires a migration to be open by the RS? + // regionNode.getFormatVersion() + + // Add the open region operation to the server dispatch queue. + // The pending open will be dispatched to the server together with the other + // pending operation for that server. + addToRemoteDispatcher(env, regionNode.getRegionLocation()); + return true; + } + + @Override + protected void completeTransition(final MasterProcedureEnv env, final RegionStateNode regionNode) + throws IOException { + LOG.info("Completed " + regionNode); + env.getAssignmentManager().markRegionAsOpened(regionNode); + } + + @Override + protected void reportTransition(final MasterProcedureEnv env, final RegionStateNode regionNode, + final TransitionCode code, final long openSeqNum) throws UnexpectedStateException { + switch (code) { + case OPENED: + LOG.debug("Report " + code + " openSeqNum=" + openSeqNum + ", " + regionNode); + if (openSeqNum < 0) { + throw new UnexpectedStateException("Report unexpected " + code + " transition openSeqNum=" + + openSeqNum + ", " + regionNode); + } + regionNode.setOpenSeqNum(openSeqNum); + setTransitionState(RegionTransitionState.REGION_TRANSITION_FINISH); + break; + case FAILED_OPEN: + LOG.warn("Report " + code + " openSeqNum=" + openSeqNum + ", " + regionNode); + handleFailure(env, regionNode); + break; + default: + throw new UnexpectedStateException("Report unexpected " + code + + " transition openSeqNum=" + openSeqNum + ", " + regionNode + + ", expected OPENED or FAILED_OPEN."); + } + } + + private void handleFailure(final MasterProcedureEnv env, final RegionStateNode regionNode) { + if (incrementAndCheckMaxAttempts(env, regionNode)) { + aborted.set(true); + } + regionNode.setState(State.OFFLINE); + regionNode.setRegionLocation(null); + setTransitionState(RegionTransitionState.REGION_TRANSITION_QUEUE); + } + + private boolean incrementAndCheckMaxAttempts(final MasterProcedureEnv env, + final RegionStateNode regionNode) { + int retries = env.getAssignmentManager().incrementAndGetFailedOpen(regionNode.getRegionInfo()); + LOG.info("RETRY " + retries + ": " + this); + return retries >= env.getAssignmentManager().getAssignMaxAttempts(); + } + + @Override + public RemoteOperation remoteCallBuild(final MasterProcedureEnv env, final ServerName serverName) { + assert serverName.equals(getRegionState(env).getRegionLocation()); + return new RegionOpenOperation(this, getRegionInfo(), + env.getAssignmentManager().getFavoredNodes(getRegionInfo()), false); + } + + @Override + protected void remoteCallFailed(final MasterProcedureEnv env, final RegionStateNode regionNode, + final IOException exception) { + // TODO: put the server in the bad list? + handleFailure(env, regionNode); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java new file mode 100644 index 0000000..b5de840 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -0,0 +1,1520 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.RegionStateListener; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; +import org.apache.hadoop.hbase.favored.FavoredNodeLoadBalancer; +import org.apache.hadoop.hbase.master.AssignmentListener; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.MetricsAssignmentManager; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; +import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState; +import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; +import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureEvent; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; + +import com.google.common.annotations.VisibleForTesting; + +/** + * The AssignmentManager is the coordinator for region assign/unassign operations. + *

    + *
  • In-memory states of regions and servers are stored in RegionStates
  • + *
  • hbase:meta states updates are handled by RegionStateStore
  • + *
+ * Regions are created by CreateTableProcedure, Split... + * Regions are deleted by DeleteTableProcedure, Merge... + *

Do we report same AM metrics as we used too? We do it all in here now. + */ +@InterfaceAudience.Private +public class AssignmentManager implements ServerListener { + private static final Log LOG = LogFactory.getLog(AssignmentManager.class); + + // TODO: AMv2 + // - handle region migration + // - handle meta assignment first + // - handle sys table assignment first (e.g. acl, namespace) + // - handle table priorities + + public static final String BOOTSTRAP_THREAD_POOL_SIZE_CONF_KEY = + "hbase.assignment.bootstrap.thread.pool.size"; + + private static final int DEFAULT_BOOTSTRAP_THREAD_POOL_SIZE = 16; + + public static final String ASSIGN_DISPATCH_WAIT_MSEC_CONF_KEY = + "hbase.assignment.dispatch.wait.msec"; + private static final int DEFAULT_ASSIGN_DISPATCH_WAIT_MSEC = 150; + + public static final String ASSIGN_DISPATCH_WAITQ_MAX_CONF_KEY = + "hbase.assignment.dispatch.wait.queue.max.size"; + private static final int DEFAULT_ASSIGN_DISPATCH_WAITQ_MAX = 100; + + public static final String RIT_CHORE_INTERVAL_MSEC_CONF_KEY = + "hbase.assignment.rit.chore.interval.msec"; + private static final int DEFAULT_RIT_CHORE_INTERVAL_MSEC = 5 * 1000; + + public static final String ASSIGN_MAX_ATTEMPTS = + "hbase.assignment.maximum.attempts"; + private static final int DEFAULT_ASSIGN_MAX_ATTEMPTS = 10; + + // Used by TestSplitTransactionOnClusterf + public static boolean TEST_SKIP_SPLIT_HANDLING = false; + + private final ProcedureEvent metaInitializedEvent = new ProcedureEvent<>("meta initialized"); + private final ProcedureEvent metaLoadEvent = new ProcedureEvent<>("meta load"); + + /** + * Indicator that AssignmentManager has recovered the region states so + * that ServerCrashProcedure can be fully enabled and re-assign regions + * of dead servers. So that when re-assignment happens, AssignmentManager + * has proper region states. + */ + private final ProcedureEvent failoverCleanupDone = new ProcedureEvent<>("failover cleanup"); + + /** Listeners that are called on assignment events. */ + private final CopyOnWriteArrayList listeners = + new CopyOnWriteArrayList(); + + // TODO: why is this different from the listeners (carried over from the old AM) + private RegionStateListener regionStateListener; + + private final MetricsAssignmentManager metrics; + private final RegionInTransitionChore ritChore; + private final MasterServices master; + + private final AtomicBoolean running = new AtomicBoolean(false); + private final RegionStates regionStateMap = new RegionStates(); + private final RegionStateStore regionStateStore; + + private final boolean shouldAssignRegionsWithFavoredNodes; + private final int assignDispatchWaitQueueMaxSize; + private final int assignDispatchWaitMillis; + private final int assignMaxAttempts; + + private Thread assignThread; + + public AssignmentManager(final MasterServices master) { + this(master, new RegionStateStore(master)); + } + + public AssignmentManager(final MasterServices master, final RegionStateStore stateStore) { + this.master = master; + this.regionStateStore = stateStore; + this.metrics = new MetricsAssignmentManager(); + + final Configuration conf = master.getConfiguration(); + + // Only read favored nodes if using the favored nodes load balancer. + this.shouldAssignRegionsWithFavoredNodes = FavoredNodeLoadBalancer.class.isAssignableFrom( + conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class)); + + this.assignDispatchWaitMillis = conf.getInt(ASSIGN_DISPATCH_WAIT_MSEC_CONF_KEY, + DEFAULT_ASSIGN_DISPATCH_WAIT_MSEC); + this.assignDispatchWaitQueueMaxSize = conf.getInt(ASSIGN_DISPATCH_WAITQ_MAX_CONF_KEY, + DEFAULT_ASSIGN_DISPATCH_WAITQ_MAX); + + this.assignMaxAttempts = Math.max(1, conf.getInt(ASSIGN_MAX_ATTEMPTS, + DEFAULT_ASSIGN_MAX_ATTEMPTS)); + + int ritChoreInterval = conf.getInt(RIT_CHORE_INTERVAL_MSEC_CONF_KEY, + DEFAULT_RIT_CHORE_INTERVAL_MSEC); + this.ritChore = new RegionInTransitionChore(ritChoreInterval); + } + + public void start() throws IOException { + if (!running.compareAndSet(false, true)) { + return; + } + + LOG.info("Starting assignment manager"); + + // Register Server Listener + master.getServerManager().registerListener(this); + + // Start the RegionStateStore + regionStateStore.start(); + + // Start the Assignment Thread + startAssignmentThread(); + } + + public void stop() { + if (!running.compareAndSet(true, false)) { + return; + } + + LOG.info("Stopping assignment manager"); + + // Remove the RIT chore + master.getMasterProcedureExecutor().removeChore(this.ritChore); + + // Stop the Assignment Thread + stopAssignmentThread(); + + // Stop the RegionStateStore + regionStateMap.clear(); + regionStateStore.stop(); + + // Unregister Server Listener + master.getServerManager().unregisterListener(this); + + // Update meta events (for testing) + getProcedureScheduler().suspendEvent(metaLoadEvent); + setFailoverCleanupDone(false); + for (HRegionInfo hri: getMetaRegionSet()) { + setMetaInitialized(hri, false); + } + } + + public boolean isRunning() { + return running.get(); + } + + public MetricsAssignmentManager getAssignmentManagerMetrics() { + return metrics; + } + + private LoadBalancer getBalancer() { + return master.getLoadBalancer(); + } + + private MasterProcedureEnv getProcedureEnvironment() { + return master.getMasterProcedureExecutor().getEnvironment(); + } + + private MasterProcedureScheduler getProcedureScheduler() { + return getProcedureEnvironment().getProcedureScheduler(); + } + + protected int getAssignMaxAttempts() { + return assignMaxAttempts; + } + + /** + * Add the listener to the notification list. + * @param listener The AssignmentListener to register + */ + public void registerListener(final AssignmentListener listener) { + this.listeners.add(listener); + } + + /** + * Remove the listener from the notification list. + * @param listener The AssignmentListener to unregister + */ + public boolean unregisterListener(final AssignmentListener listener) { + return this.listeners.remove(listener); + } + + public void setRegionStateListener(final RegionStateListener listener) { + this.regionStateListener = listener; + } + + // TODO: Why public? + public RegionStates getRegionStates() { + return regionStateMap; + } + + // TODO: Why public? + public RegionStateStore getRegionStateStore() { + return regionStateStore; + } + + public List getFavoredNodes(final HRegionInfo regionInfo) { + if (shouldAssignRegionsWithFavoredNodes) { + return ((FavoredNodeLoadBalancer)getBalancer()).getFavoredNodes(regionInfo); + } + return ServerName.EMPTY_SERVER_LIST; + } + + // ============================================================================================ + // Table State Manager helpers + // ============================================================================================ + // TODO: Why public? + public TableStateManager getTableStateManager() { + return master.getTableStateManager(); + } + + public boolean isTableEnabled(final TableName tableName) { + return getTableStateManager().isTableState(tableName, TableState.State.ENABLED); + } + + public boolean isTableDisabled(final TableName tableName) { + return getTableStateManager().isTableState(tableName, + TableState.State.DISABLED, TableState.State.DISABLING); + } + + // ============================================================================================ + // META Helpers + // ============================================================================================ + private boolean isMetaRegion(final HRegionInfo regionInfo) { + return regionInfo.isMetaRegion(); + } + + public boolean isMetaRegion(final byte[] regionName) { + return getMetaRegionFromName(regionName) != null; + } + + public HRegionInfo getMetaRegionFromName(final byte[] regionName) { + for (HRegionInfo hri: getMetaRegionSet()) { + if (Bytes.equals(hri.getRegionName(), regionName)) { + return hri; + } + } + return null; + } + + public boolean isCarryingMeta(final ServerName serverName) { + for (HRegionInfo hri: getMetaRegionSet()) { + if (isCarryingMeta(serverName, hri)) { + return true; + } + } + return false; + } + + public boolean isCarryingMeta(final ServerName serverName, final HRegionInfo regionInfo) { + return isCarryingRegion(serverName, regionInfo); + } + + public boolean isCarryingMetaReplica(final ServerName serverName, final HRegionInfo regionInfo) { + return isCarryingRegion(serverName, regionInfo); + } + + private boolean isCarryingRegion(final ServerName serverName, final HRegionInfo regionInfo) { + // TODO: check for state? + final RegionStateNode node = regionStateMap.getRegionNode(regionInfo); + return(node != null && serverName.equals(node.getRegionLocation())); + } + + private HRegionInfo getMetaForRegion(final HRegionInfo regionInfo) { + //if (regionInfo.isMetaRegion()) return regionInfo; + // TODO: handle multiple meta. if the region provided is not meta lookup + // which meta the region belongs to. + return HRegionInfo.FIRST_META_REGIONINFO; + } + + // TODO: handle multiple meta. + private static final Set META_REGION_SET = + Collections.singleton(HRegionInfo.FIRST_META_REGIONINFO); + public Set getMetaRegionSet() { + return META_REGION_SET; + } + + // ============================================================================================ + // META Event(s) helpers + // ============================================================================================ + public boolean isMetaInitialized() { + return metaInitializedEvent.isReady(); + } + + public boolean isMetaRegionInTransition() { + return !isMetaInitialized(); + } + + public boolean waitMetaInitialized(final Procedure proc) { + // TODO: handle multiple meta. should this wait on all meta? + // this is used by the ServerCrashProcedure... + return waitMetaInitialized(proc, HRegionInfo.FIRST_META_REGIONINFO); + } + + public boolean waitMetaInitialized(final Procedure proc, final HRegionInfo regionInfo) { + return getProcedureScheduler().waitEvent( + getMetaInitializedEvent(getMetaForRegion(regionInfo)), proc); + } + + private void setMetaInitialized(final HRegionInfo metaRegionInfo, final boolean isInitialized) { + assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo; + final ProcedureEvent metaInitEvent = getMetaInitializedEvent(metaRegionInfo); + if (isInitialized) { + getProcedureScheduler().wakeEvent(metaInitEvent); + } else { + getProcedureScheduler().suspendEvent(metaInitEvent); + } + } + + private ProcedureEvent getMetaInitializedEvent(final HRegionInfo metaRegionInfo) { + assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo; + // TODO: handle multiple meta. + return metaInitializedEvent; + } + + public boolean waitMetaLoaded(final Procedure proc) { + return getProcedureScheduler().waitEvent(metaLoadEvent, proc); + } + + protected void wakeMetaLoadedEvent() { + getProcedureScheduler().wakeEvent(metaLoadEvent); + assert isMetaLoaded() : "expected meta to be loaded"; + } + + public boolean isMetaLoaded() { + return metaLoadEvent.isReady(); + } + + // ============================================================================================ + // TODO: Sync helpers + // ============================================================================================ + public void assignMeta(final HRegionInfo metaRegionInfo) throws IOException { + assignMeta(metaRegionInfo, null); + } + + public void assignMeta(final HRegionInfo metaRegionInfo, final ServerName serverName) + throws IOException { + assert isMetaRegion(metaRegionInfo) : "unexpected non-meta region " + metaRegionInfo; + AssignProcedure proc; + if (serverName != null) { + LOG.debug("Try assigning Meta " + metaRegionInfo + " to " + serverName); + proc = createAssignProcedure(metaRegionInfo, serverName); + } else { + LOG.debug("Assigning " + metaRegionInfo.getRegionNameAsString()); + proc = createAssignProcedure(metaRegionInfo, false); + } + ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), proc); + } + + public void assign(final HRegionInfo regionInfo) throws IOException { + assign(regionInfo, true); + } + + public void assign(final HRegionInfo regionInfo, final boolean forceNewPlan) throws IOException { + AssignProcedure proc = createAssignProcedure(regionInfo, forceNewPlan); + ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), proc); + } + + public void unassign(final HRegionInfo regionInfo) { + // TODO: rename this in reassign + } + + @VisibleForTesting + public void reopen(final HRegionInfo region) { + // TODO: used by TestScannersFromClientSide.java??? + } + + public Future moveAsync(final RegionPlan regionPlan) { + MoveRegionProcedure proc = createMoveRegionProcedure(regionPlan); + return ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), proc); + } + + public boolean waitForAssignment(final HRegionInfo regionInfo) throws IOException { + return waitForAssignment(regionInfo, Long.MAX_VALUE); + } + + public boolean waitForAssignment(final HRegionInfo regionInfo, final long timeout) throws IOException { + RegionStateNode node = regionStateMap.getRegionNode(regionInfo); + if (node == null) return false; + + RegionTransitionProcedure proc = node.getProcedure(); + if (proc == null) return false; + + ProcedureSyncWait.waitForProcedureToCompleteIOE( + master.getMasterProcedureExecutor(), proc.getProcId(), timeout); + return true; + } + + // ============================================================================================ + // RegionTransition procedures helpers + // ============================================================================================ + public AssignProcedure[] createAssignProcedures(final Collection regionInfo) { + return createAssignProcedures(regionInfo, false); + } + + public AssignProcedure[] createAssignProcedures(final Collection regionInfo, + final boolean forceNewPlan) { + final AssignProcedure[] procs = new AssignProcedure[regionInfo.size()]; + int index = 0; + for (HRegionInfo hri: regionInfo) { + procs[index++] = createAssignProcedure(hri, forceNewPlan); + } + return procs; + } + + public UnassignProcedure[] createUnassignProcedures(final Collection regionInfo) { + final UnassignProcedure[] procs = new UnassignProcedure[regionInfo.size()]; + int index = 0; + for (HRegionInfo hri: regionInfo) { + procs[index++] = createUnassignProcedure(hri, null, false); + } + return procs; + } + + public MoveRegionProcedure[] createReopenProcedures(final Collection regionInfo) { + final MoveRegionProcedure[] procs = new MoveRegionProcedure[regionInfo.size()]; + int index = 0; + for (HRegionInfo hri: regionInfo) { + final ServerName serverName = regionStateMap.getRegionServerOfRegion(hri); + final RegionPlan plan = new RegionPlan(hri, serverName, serverName); + procs[index++] = createMoveRegionProcedure(plan); + } + return procs; + } + + /** + * Called by things like EnableTableProcedure to get a list of AssignProcedure + * to assign the regions of the table. + */ + public AssignProcedure[] createAssignProcedures(final TableName tableName) { + return createAssignProcedures(regionStateMap.getRegionsOfTable(tableName)); + } + + /** + * Called by things like DisableTableProcedure to get a list of UnassignProcedure + * to unassign the regions of the table. + */ + public UnassignProcedure[] createUnassignProcedures(final TableName tableName) { + return createUnassignProcedures(regionStateMap.getRegionsOfTable(tableName)); + } + + /** + * Called by things like ModifyColumnFamilyProcedure to get a list of MoveRegionProcedure + * to reopen the regions of the table. + */ + public MoveRegionProcedure[] createReopenProcedures(final TableName tableName) { + return createReopenProcedures(regionStateMap.getRegionsOfTable(tableName)); + } + + public AssignProcedure createAssignProcedure(final HRegionInfo regionInfo, + final boolean forceNewPlan) { + AssignProcedure proc = new AssignProcedure(regionInfo, forceNewPlan); + proc.setOwner(getProcedureEnvironment().getRequestUser().getShortName()); + return proc; + } + + public AssignProcedure createAssignProcedure(final HRegionInfo regionInfo, + final ServerName targetServer) { + AssignProcedure proc = new AssignProcedure(regionInfo, targetServer); + proc.setOwner(getProcedureEnvironment().getRequestUser().getShortName()); + return proc; + } + + public UnassignProcedure createUnassignProcedure(final HRegionInfo regionInfo, + final ServerName destinationServer, final boolean force) { + UnassignProcedure proc = new UnassignProcedure(regionInfo, destinationServer, force); + proc.setOwner(getProcedureEnvironment().getRequestUser().getShortName()); + return proc; + } + + public MoveRegionProcedure createMoveRegionProcedure(final RegionPlan plan) { + MoveRegionProcedure proc = new MoveRegionProcedure(plan); + proc.setOwner(getProcedureEnvironment().getRequestUser().getShortName()); + return proc; + } + + /** + * Delete the region states. This is called by "DeleteTable" + */ + public void deleteTable(final TableName tableName) { + final ArrayList regions = regionStateMap.getTableRegionStateNodes(tableName); + for (int i = 0; i < regions.size(); ++i) { + final HRegionInfo regionInfo = regions.get(i).getRegionInfo(); + // we expect the region to be offline + regionStateMap.removeFromOfflineRegions(regionInfo); + regionStateMap.deleteRegion(regionInfo); + } + } + + // ============================================================================================ + // RS Region Transition Report helpers + // ============================================================================================ + // TODO: Move this code in MasterRpcServices and call on specific event? + public ReportRegionStateTransitionResponse reportRegionStateTransition( + final ReportRegionStateTransitionRequest req) + throws PleaseHoldException { + final ReportRegionStateTransitionResponse.Builder builder = + ReportRegionStateTransitionResponse.newBuilder(); + + final ServerName serverName = ProtobufUtil.toServerName(req.getServer()); + try { + for (RegionStateTransition transition: req.getTransitionList()) { + switch (transition.getTransitionCode()) { + case OPENED: + case FAILED_OPEN: + case CLOSED: + assert transition.getRegionInfoCount() == 1 : transition; + final HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0)); + updateRegionTransition(serverName, transition.getTransitionCode(), hri, + transition.hasOpenSeqNum() ? transition.getOpenSeqNum() : HConstants.NO_SEQNUM); + break; + case READY_TO_SPLIT: + case SPLIT_PONR: + case SPLIT: + case SPLIT_REVERTED: + assert transition.getRegionInfoCount() == 3 : transition; + final HRegionInfo parent = HRegionInfo.convert(transition.getRegionInfo(0)); + final HRegionInfo splitA = HRegionInfo.convert(transition.getRegionInfo(1)); + final HRegionInfo splitB = HRegionInfo.convert(transition.getRegionInfo(2)); + updateRegionSplitTransition(serverName, transition.getTransitionCode(), + parent, splitA, splitB); + break; + case READY_TO_MERGE: + case MERGE_PONR: + case MERGED: + case MERGE_REVERTED: + assert transition.getRegionInfoCount() == 3 : transition; + final HRegionInfo merged = HRegionInfo.convert(transition.getRegionInfo(0)); + final HRegionInfo mergeA = HRegionInfo.convert(transition.getRegionInfo(1)); + final HRegionInfo mergeB = HRegionInfo.convert(transition.getRegionInfo(2)); + updateRegionMergeTransition(serverName, transition.getTransitionCode(), + merged, mergeA, mergeB); + break; + } + } + } catch (UnexpectedStateException|UnsupportedOperationException e) { + // TODO: at the moment we have a single error message and the RS will abort + // if the master says that one of the region transition failed. + LOG.warn("failed to transition: " + e.getMessage()); + builder.setErrorMessage("failed to transition: " + e.getMessage()); + } + return builder.build(); + } + + private void updateRegionTransition(final ServerName serverName, final TransitionCode state, + final HRegionInfo regionInfo, final long seqId) + throws PleaseHoldException, UnexpectedStateException { + checkFailoverCleanupCompleted(regionInfo); + + final RegionStateNode regionNode = regionStateMap.getRegionNode(regionInfo); + if (regionNode == null) { + // the table/region is gone. maybe a delete, split, merge + throw new UnexpectedStateException(String.format( + "Server %s was trying to transition region %s to %s. but the region was removed.", + serverName, regionInfo, state)); + } + + LOG.info(String.format("UPDATE REGION TRANSITION serverName=%s region=%s state=%s", + serverName, regionNode, state)); + + final ServerStateNode serverNode = regionStateMap.getOrCreateServer(serverName); + if (!reportTransition(regionNode, serverNode, state, seqId)) { + LOG.warn(String.format( + "no procedure found for region=%s. server=%s was trying to transition to %s", + regionNode, serverName, state)); + } + } + + private boolean reportTransition(final RegionStateNode regionNode, + final ServerStateNode serverNode, final TransitionCode state, final long seqId) + throws UnexpectedStateException { + final ServerName serverName = serverNode.getServerName(); + synchronized (regionNode) { + final RegionTransitionProcedure proc = regionNode.getProcedure(); + if (proc == null) return false; + + //serverNode.getReportEvent().removeProcedure(proc); + proc.reportTransition(master.getMasterProcedureExecutor().getEnvironment(), + serverName, state, seqId); + return true; + } + } + + private void updateRegionSplitTransition(final ServerName serverName, final TransitionCode state, + final HRegionInfo parent, final HRegionInfo hriA, final HRegionInfo hriB) + throws PleaseHoldException, UnexpectedStateException { + checkFailoverCleanupCompleted(parent); + final RegionStateNode regionNode = regionStateMap.getRegionNode(parent); + if (regionNode == null) { + // the table/region is gone. maybe a delete, split, merge + throw new UnexpectedStateException(String.format( + "Server %s was trying to transition region %s to %s. but the region was removed.", + serverName, parent, state)); + } + + LOG.info(String.format("UPDATE REGION TRANSITION serverName=%s region=%s state=%s", + serverName, regionNode, state)); + /* + final ServerStateNode serverNode = regionStateMap.getOrCreateServer(serverName); + if (!reportTransition(regionNode, serverNode, state, seqId)) { + LOG.warn(String.format( + "No procedure found for region=%s. server=%s was trying to transition to %s", + regionNode, serverName, state)); + } + + // TODO: Attach split support + throw new UnsupportedOperationException(String.format( + "Split not handled yet: state=%s parent=%s hriA=%s hriB=%s", state, parent, hriA, hriB)); + */ + } + + private void updateRegionMergeTransition(final ServerName serverName, final TransitionCode state, + final HRegionInfo merged, final HRegionInfo hriA, final HRegionInfo hriB) + throws PleaseHoldException, UnexpectedStateException { + checkFailoverCleanupCompleted(merged); + + // TODO: Attach merge support + throw new UnsupportedOperationException(String.format( + "Merge not handled yet: state=%s merged=%s hriA=%s hriB=%s", state, merged, hriA, hriB)); + } + + // ============================================================================================ + // RS Status update (report online regions) helpers + // ============================================================================================ + /** + * the master will call this method when the RS send the regionServerReport(). + * the report will contains the "hbase version" and the "online regions". + * this method will check the the online regions against the in-memory state of the AM, + * if there is a mismatch we will try to fence out the RS with the assumption + * that something went wrong on the RS side. + */ + public void reportOnlineRegions(final ServerName serverName, + final int versionNumber, final Set regionNames) { + if (!isRunning()) return; + final ServerStateNode serverNode = regionStateMap.getOrCreateServer(serverName); + if (LOG.isDebugEnabled()) { + LOG.debug("ReportOnlineRegions " + serverName + " regionCount=" + regionNames.size() + + ", metaLoaded=" + isMetaLoaded()); + } + + // update the server version number. This will be used for live upgrades. + synchronized (serverNode) { + serverNode.setVersionNumber(versionNumber); + if (serverNode.isInState(ServerState.SPLITTING, ServerState.OFFLINE)) { + LOG.warn("Got a report from a server result in state " + serverNode.getState()); + return; + } + } + + if (regionNames.isEmpty()) { + // nothing to do if we don't have regions + LOG.trace("no online region found on " + serverName); + } else if (!isMetaLoaded()) { + // if we are still on startup, discard the report unless is from someone holding meta + checkOnlineRegionsReportForMeta(serverNode, regionNames); + } else { + // The Heartbeat updates us of what regions are only. check and verify the state. + checkOnlineRegionsReport(serverNode, regionNames); + } + + // wake report event + wakeServerReportEvent(serverNode); + } + + public void checkOnlineRegionsReportForMeta(final ServerStateNode serverNode, + final Set regionNames) { + try { + for (byte[] regionName: regionNames) { + final HRegionInfo hri = getMetaRegionFromName(regionName); + if (hri == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip online report for region=" + Bytes.toStringBinary(regionName) + + " while meta is loading"); + } + continue; + } + + final RegionStateNode regionNode = regionStateMap.getOrCreateRegionNode(hri); + LOG.info("META REPORTED: " + regionNode); + if (!reportTransition(regionNode, serverNode, TransitionCode.OPENED, 0)) { + LOG.warn("META REPORTED but no procedure found"); + regionNode.setRegionLocation(serverNode.getServerName()); + } + } + } catch (UnexpectedStateException e) { + final ServerName serverName = serverNode.getServerName(); + LOG.warn("Killing server=" + serverName + ": " + e.getMessage()); + killRegionServer(serverNode); + } + } + + public void checkOnlineRegionsReport(final ServerStateNode serverNode, + final Set regionNames) { + final ServerName serverName = serverNode.getServerName(); + try { + for (byte[] regionName: regionNames) { + if (!isRunning()) return; + + final RegionStateNode regionNode = regionStateMap.getRegionNodeFromName(regionName); + if (regionNode == null) { + throw new UnexpectedStateException( + "Reported online region " + Bytes.toStringBinary(regionName) + " not found"); + } + + synchronized (regionNode) { + if (regionNode.isInState(State.OPENING, State.OPEN)) { + if (!regionNode.getRegionLocation().equals(serverName)) { + throw new UnexpectedStateException( + "Reported OPEN region on server=" + serverName + + " but the state found says server=" + regionNode.getRegionLocation()); + } else if (regionNode.isInState(State.OPENING)) { + try { + if (!reportTransition(regionNode, serverNode, TransitionCode.OPENED, 0)) { + LOG.warn("Reported OPEN region on server=" + serverName + + " but the state found says " + regionNode + " and no procedure is running"); + } + } catch (UnexpectedStateException e) { + LOG.warn("unexpected exception while trying to report " + regionNode + + " as open: " + e.getMessage(), e); + } + } + } else if (!regionNode.isInState(State.CLOSING, State.SPLITTING)) { + // TODO: We end up killing the RS if we get a report while we already + // transitioned to close or split. we should have a timeout/timestamp to compare + throw new UnexpectedStateException( + "Reported OPEN region, but the state found says " + regionNode.getState()); + } + } + } + } catch (UnexpectedStateException e) { + LOG.warn("Killing server=" + serverName + ": " + e.getMessage()); + killRegionServer(serverNode); + } + } + + protected boolean waitServerReportEvent(final ServerName serverName, final Procedure proc) { + final ServerStateNode serverNode = regionStateMap.getOrCreateServer(serverName); + if (LOG.isDebugEnabled()) { + LOG.debug("Wait for " + serverName + " report on " + proc); + } + return getProcedureScheduler().waitEvent(serverNode.getReportEvent(), proc); + } + + protected void wakeServerReportEvent(final ServerStateNode serverNode) { + getProcedureScheduler().wakeEvent(serverNode.getReportEvent()); + } + + // ============================================================================================ + // RIT chore + // ============================================================================================ + private static class RegionInTransitionChore extends ProcedureInMemoryChore { + public RegionInTransitionChore(final int timeoutMsec) { + super(timeoutMsec); + } + + @Override + protected void periodicExecute(final MasterProcedureEnv env) { + final AssignmentManager am = env.getAssignmentManager(); + final int ritThreshold = env.getMasterConfiguration(). + getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000); + + final long currentTime = EnvironmentEdgeManager.currentTime(); + + int totalRITs = 0; + int totalRITsOverThreshold = 0; + long oldestRITTime = 0; + for (RegionState state: am.getRegionStates().getRegionsStateInTransition()) { + totalRITs++; + final long ritTime = currentTime - state.getStamp(); + if (ritTime > ritThreshold) { + am.handleRegionOverStuckWarningThreshold(state.getRegion()); + totalRITsOverThreshold++; + } + if (oldestRITTime < ritTime) { + oldestRITTime = ritTime; + } + } + + // update metrics + am.updateRegionsInTransitionMetrics(totalRITs, totalRITsOverThreshold, oldestRITTime); + } + } + + private void updateRegionsInTransitionMetrics(final int totalRITs, + final int totalRITsOverThreshold, final long oldestRITTime) { + metrics.updateRITOldestAge(oldestRITTime); + metrics.updateRITCount(totalRITs); + metrics.updateRITCountOverThreshold(totalRITsOverThreshold); + } + + private void handleRegionOverStuckWarningThreshold(final HRegionInfo regionInfo) { + final RegionStateNode regionNode = regionStateMap.getRegionNode(regionInfo); + LOG.warn("TODO Handle region stuck in transition: " + regionNode); + } + + // ============================================================================================ + // TODO: Master load/bootstrap + // ============================================================================================ + public void joinCluster() throws IOException { + final long startTime = System.currentTimeMillis(); + + LOG.info("Joining the cluster..."); +/* + int poolSize = master.getConfiguration().getInt(BOOTSTRAP_THREAD_POOL_SIZE_CONF_KEY, + DEFAULT_BOOTSTRAP_THREAD_POOL_SIZE); + TaskPool taskPool = TaskPool(poolSize, 60, TimeUnit.SECONDS, "AssignmentManager-JoinCluster"); +*/ + // Scan hbase:meta to build list of existing regions, servers, and assignment + loadMeta(); + + while (master.getServerManager().countOfRegionServers() < 1) { + if (LOG.isTraceEnabled()) LOG.trace("Waiting for RegionServers to heartbeat/join"); + Threads.sleep(250); + } + + // This method will assign all user regions if a clean server startup or + // it will reconstruct master state and cleanup any leftovers from previous master process. + boolean failover = processofflineServersWithOnlineRegions(); + + // Start the RIT chore + master.getMasterProcedureExecutor().addChore(this.ritChore); + + LOG.info(String.format("Joined cluster of " + master.getServerManager().countOfRegionServers() + + " RegionServers in %s, failover=%s", + StringUtils.humanTimeDiff(System.currentTimeMillis() - startTime), failover)); + } + + private void loadMeta() throws IOException { + // TODO: use a thread pool + MetaTableAccessor.fullScanRegions(master.getConnection(), new MetaTableAccessor.Visitor() { + final boolean isDebugEnabled = LOG.isDebugEnabled(); + + @Override + public boolean visit(Result r) throws IOException { + if (r != null && !r.isEmpty()) { + long st = System.currentTimeMillis(); + loadMeta(r); + long et = System.currentTimeMillis(); + if (LOG.isTraceEnabled()) { + LOG.trace("LOAD META PERF " + StringUtils.humanTimeDiff(et - st)); + } + } else if (isDebugEnabled) { + LOG.warn("Null result from hbase:meta - ignoring but this is strange."); + } + return true; + } + }); + + // every assignment is blocked until meta is loaded. + wakeMetaLoadedEvent(); + } + + // Read into memory content of the hbase:meta table. + private void loadMeta(final Result result) throws IOException { + final RegionLocations rl = MetaTableAccessor.getRegionLocations(result); + if (rl == null) return; + + final HRegionLocation[] locations = rl.getRegionLocations(); + if (locations == null) return; + + for (int i = 0; i < locations.length; ++i) { + final HRegionLocation hrl = locations[i]; + if (hrl == null) continue; + + final HRegionInfo regionInfo = hrl.getRegionInfo(); + if (regionInfo == null) continue; + + final int replicaId = regionInfo.getReplicaId(); + final State state = regionStateStore.getRegionState(result, replicaId); + + final ServerName lastHost = hrl.getServerName(); + final ServerName regionLocation = regionStateStore.getRegionServer(result, replicaId); + + final RegionStateNode regionNode = regionStateMap.getOrCreateRegionNode(regionInfo); + synchronized (regionNode) { + if (!regionNode.isInTransition()) { + regionNode.setState(state); + regionNode.setLastHost(lastHost); + regionNode.setRegionLocation(regionLocation); + + if (state == State.OPEN) { + assert regionLocation != null : "found null region location for " + regionNode; + regionStateMap.addRegionToServer(regionLocation, regionNode); + } else if (state == State.OFFLINE) { + regionStateMap.addToOfflineRegions(regionNode); + } else { + // These regions should have a procedure in replay + regionStateMap.addRegionInTransition(regionNode, null); + } + } + } + + // TODO: For now INFO level. Later, make it DEBUG or TRACE because could be millions! + if (LOG.isInfoEnabled()) { + LOG.info(String.format("Loaded hbase:meta row region=%s state=%s lastHost=%s regionLocation=%s", + regionInfo, state, lastHost, regionLocation)); + } + } + } + + // TODO: the assumption here is that if RSs are crashing while we are executing this + // they will be handled by the SSH that will be putted in the ServerManager "queue". + // we can integrate this a bit better. + private boolean processofflineServersWithOnlineRegions() { + boolean failover = !master.getServerManager().getDeadServers().isEmpty(); + + final Set offlineServersWithOnlineRegions = new HashSet(); + final ArrayList regionsToAssign = new ArrayList(); + long st, et; + + st = System.currentTimeMillis(); + for (RegionStateNode regionNode: regionStateMap.getRegionNodes()) { + if (regionNode.getState() == State.OPEN) { + final ServerName serverName = regionNode.getRegionLocation(); + if (!master.getServerManager().isServerOnline(serverName)) { + offlineServersWithOnlineRegions.add(serverName); + } + } else if (regionNode.getState() == State.OFFLINE) { + if (isTableEnabled(regionNode.getTable())) { + regionsToAssign.add(regionNode.getRegionInfo()); + } + } + } + if (LOG.isTraceEnabled()) { + et = System.currentTimeMillis(); + LOG.info("[STEP-1] " + StringUtils.humanTimeDiff(et - st)); + } + // kill servers with online regions + st = System.currentTimeMillis(); + for (ServerName serverName: offlineServersWithOnlineRegions) { + if (!master.getServerManager().isServerOnline(serverName)) { + LOG.info("KILL RS hosting regions but not online " + serverName + + " (master=" + master.getServerName() + ")"); + killRegionServer(serverName); + } + } + if (LOG.isTraceEnabled()) { + et = System.currentTimeMillis(); + LOG.info("[STEP-2] " + StringUtils.humanTimeDiff(et - st)); + } + setFailoverCleanupDone(true); + + // assign offline regions + st = System.currentTimeMillis(); + for (HRegionInfo regionInfo: regionsToAssign) { + master.getMasterProcedureExecutor().submitProcedure( + createAssignProcedure(regionInfo, false)); + } + if (LOG.isTraceEnabled()) { + et = System.currentTimeMillis(); + LOG.info("[STEP-3] " + StringUtils.humanTimeDiff(et - st)); + } + return failover; + } + + /** + * Used by ServerCrashProcedure to make sure AssignmentManager has completed + * the failover cleanup before re-assigning regions of dead servers. So that + * when re-assignment happens, AssignmentManager has proper region states. + */ + public boolean isFailoverCleanupDone() { + return failoverCleanupDone.isReady(); + } + + /** + * Used by ServerCrashProcedure tests verify the ability to suspend the + * execution of the ServerCrashProcedure. + */ + @VisibleForTesting + public void setFailoverCleanupDone(final boolean b) { + master.getMasterProcedureExecutor().getEnvironment() + .setEventReady(failoverCleanupDone, b); + } + + public ProcedureEvent getFailoverCleanupEvent() { + return failoverCleanupDone; + } + + /** + * Used to check if the failover cleanup is done. + * if not we throw PleaseHoldException since we are rebuilding the RegionStates + * @param hri region to check if it is already rebuild + * @throws PleaseHoldException if the failover cleanup is not completed + */ + private void checkFailoverCleanupCompleted(final HRegionInfo hri) throws PleaseHoldException { + // TODO: can we avoid throwing an exception if hri is already loaded? + // at the moment we bypass only meta + if (!isMetaRegion(hri) && !isFailoverCleanupDone()) { + LOG.warn("Master is rebuilding user regions: " + hri); + throw new PleaseHoldException("Master is rebuilding user regions"); + } + } + + // ============================================================================================ + // TODO: Metrics + // ============================================================================================ + public int getNumRegionsOpened() { + // TODO: Used by TestRegionPlacement.java and assume monotonically increasing value + return 0; + } + + // TODO: can this stuff be only in the AssignProcedure and available by getting only RIT? + private Map failedTracker = + new java.util.concurrent.ConcurrentHashMap(); + public Map getFailedOpenTracker() { + return failedTracker; + } + + protected int incrementAndGetFailedOpen(final HRegionInfo regionInfo) { + Map failedOpen = getFailedOpenTracker(); + AtomicInteger count = failedOpen.get(regionInfo); + if (count == null) { + count = new AtomicInteger(1); + failedOpen.put(regionInfo, count); + return 1; + } + return count.incrementAndGet(); + } + + // ============================================================================================ + // TODO: Server Crash + // ============================================================================================ + public void onlineRegion(final HRegionInfo regionInfo, final ServerName serverName) { + // TODO used by TestSplitTransactionOnCluster.java + } + + public void offlineRegion(final HRegionInfo regionInfo) throws IOException { + // TODO used by MasterRpcServices ServerCrashProcedure + LOG.info("OFFLINE REGION " + regionInfo); + final RegionStateNode node = regionStateMap.getRegionNode(regionInfo); + if (node != null) { + node.setState(State.OFFLINE); + node.setRegionLocation(null); + } + } + + public Map> getSnapShotOfAssignment( + final Collection regions) { + return regionStateMap.getSnapShotOfAssignment(regions); + } + + // ============================================================================================ + // TODO: UTILS/HELPERS? + // ============================================================================================ + /** + * Used by the client (via master) to identify if all regions have the schema updates + * + * @param tableName + * @return Pair indicating the status of the alter command (pending/total) + * @throws IOException + */ + public Pair getReopenStatus(TableName tableName) + throws IOException { + if (isTableDisabled(tableName)) return new Pair(0, 0); + + final List states = regionStateMap.getTableRegionStates(tableName); + int ritCount = 0; + for (RegionState regionState: states) { + if (!regionState.isOpened()) ritCount++; + } + return new Pair(ritCount, states.size()); + } + + // ============================================================================================ + // TODO: Region State In Transition + // ============================================================================================ + protected boolean addRegionInTransition(final RegionStateNode regionNode, + final RegionTransitionProcedure procedure) { + return regionStateMap.addRegionInTransition(regionNode, procedure); + } + + protected void removeRegionInTransition(final RegionStateNode regionNode, + final RegionTransitionProcedure procedure) { + regionStateMap.removeRegionInTransition(regionNode, procedure); + } + + public boolean hasRegionsInTransition() { + return regionStateMap.hasRegionsInTransition(); + } + + public List getRegionsInTransition() { + return regionStateMap.getRegionsInTransition(); + } + + public List getAssignedRegions() { + return regionStateMap.getAssignedRegions(); + } + + public HRegionInfo getRegionInfo(final byte[] regionName) { + final RegionStateNode regionState = regionStateMap.getRegionNodeFromName(regionName); + return regionState != null ? regionState.getRegionInfo() : null; + } + + // ============================================================================================ + // TODO: Region Status update + // ============================================================================================ + public void markRegionAsOpening(final RegionStateNode regionNode) throws IOException { + LOG.info("TODO: MARK REGION AS OPENING " + regionNode); + synchronized (regionNode) { + if (!regionNode.setState(State.OPENING, RegionStates.STATES_EXPECTEX_IN_OPEN)) { + throw new UnexpectedStateException( + "unexpected state " + regionNode.getState() + " for region " + regionNode); + } + + // TODO: Do we need to update the state + regionStateStore.updateRegionLocation(regionNode.getRegionInfo(), State.OPENING, + regionNode.getRegionLocation(), regionNode.getLastHost(), HConstants.NO_SEQNUM); + } + } + + public void markRegionAsOpened(final RegionStateNode regionNode) throws IOException { + final HRegionInfo hri = regionNode.getRegionInfo(); + synchronized (regionNode) { + if (!regionNode.setState(State.OPEN, RegionStates.STATES_EXPECTEX_IN_OPEN)) { + throw new UnexpectedStateException( + "Unexpected state " + regionNode.getState() + " for region " + regionNode); + } + + // TODO: Update Meta + if (isMetaRegion(hri)) { + setMetaInitialized(hri, true); + } + + // TODO + LOG.info("TODO: MARK REGION AS OPEN " + regionNode); + regionStateMap.addRegionToServer(regionNode.getRegionLocation(), regionNode); + regionStateStore.updateRegionLocation(regionNode.getRegionInfo(), State.OPEN, + regionNode.getRegionLocation(), regionNode.getLastHost(), regionNode.getOpenSeqNum()); + + sendRegionOpenedNotification(hri, regionNode.getRegionLocation()); + } + } + + public void markRegionAsClosing(final RegionStateNode regionNode) throws IOException { + LOG.info("TODO: MARK REGION AS CLOSING " + regionNode); + final HRegionInfo hri = regionNode.getRegionInfo(); + synchronized (regionNode) { + if (!regionNode.setState(State.CLOSING, RegionStates.STATES_EXPECTEX_IN_CLOSE)) { + throw new UnexpectedStateException( + "unexpected state " + regionNode.getState() + " for region " + regionNode); + } + + // set meta has not initialized early. so people trying to create/edit tables will wait + if (isMetaRegion(hri)) { + setMetaInitialized(hri, false); + } + + regionStateStore.updateRegionLocation(regionNode.getRegionInfo(), State.CLOSING, + regionNode.getRegionLocation(), regionNode.getLastHost(), HConstants.NO_SEQNUM); + } + } + + public void markRegionAsClosed(final RegionStateNode regionNode) throws IOException { + LOG.info("TODO: MARK REGION AS CLOSED " + regionNode); + final HRegionInfo hri = regionNode.getRegionInfo(); + synchronized (regionNode) { + if (!regionNode.setState(State.CLOSED, RegionStates.STATES_EXPECTEX_IN_CLOSE)) { + throw new UnexpectedStateException( + "unexpected state " + regionNode.getState() + " for region " + regionNode); + } + + regionStateMap.removeRegionFromServer(regionNode.getRegionLocation(), regionNode); + regionStateStore.updateRegionLocation(regionNode.getRegionInfo(), State.CLOSED, + regionNode.getRegionLocation(), regionNode.getLastHost(), HConstants.NO_SEQNUM); + + sendRegionClosedNotification(hri); + } + } + + private void sendRegionOpenedNotification(final HRegionInfo regionInfo, + final ServerName serverName) { + getBalancer().regionOnline(regionInfo, serverName); + if (!this.listeners.isEmpty()) { + for (AssignmentListener listener : this.listeners) { + listener.regionOpened(regionInfo, serverName); + } + } + } + + private void sendRegionClosedNotification(final HRegionInfo regionInfo) { + getBalancer().regionOffline(regionInfo); + if (!this.listeners.isEmpty()) { + for (AssignmentListener listener : this.listeners) { + listener.regionClosed(regionInfo); + } + } + } + + // ============================================================================================ + // Assign Queue (Assign/Balance) + // ============================================================================================ + private final ArrayList pendingAssignQueue = new ArrayList(); + private final ReentrantLock assignQueueLock = new ReentrantLock(); + private final Condition assignQueueFullCond = assignQueueLock.newCondition(); + + /** + * Add the assign operation to the assignment queue. + * The pending assignment operation will be processed, + * and each region will be assigned by a server using the balancer. + */ + protected void queueAssign(final RegionStateNode regionNode) { + getProcedureScheduler().suspendEvent(regionNode.getProcedureEvent()); + + // TODO: quick-start for meta and the other sys-tables? + assignQueueLock.lock(); + try { + pendingAssignQueue.add(regionNode); + if (regionNode.isSystemTable() || + pendingAssignQueue.size() == 1 || + pendingAssignQueue.size() >= assignDispatchWaitQueueMaxSize) { + assignQueueFullCond.signal(); + } + } finally { + assignQueueLock.unlock(); + } + } + + private void startAssignmentThread() { + assignThread = new Thread("AssignmentThread") { + @Override + public void run() { + while (isRunning()) { + processAssignQueue(); + } + pendingAssignQueue.clear(); + } + }; + assignThread.start(); + } + + private void stopAssignmentThread() { + assignQueueSignal(); + try { + while (assignThread.isAlive()) { + assignQueueSignal(); + assignThread.join(250); + } + } catch (InterruptedException e) { + LOG.warn("join interrupted", e); + Thread.currentThread().interrupt(); + } + } + + private void assignQueueSignal() { + assignQueueLock.lock(); + try { + assignQueueFullCond.signal(); + } finally { + assignQueueLock.unlock(); + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") + private HashMap waitOnAssignQueue() { + HashMap regions = null; + + assignQueueLock.lock(); + try { + if (pendingAssignQueue.isEmpty() && isRunning()) { + assignQueueFullCond.await(); + } + + if (!isRunning()) return null; + assignQueueFullCond.await(assignDispatchWaitMillis, TimeUnit.MILLISECONDS); + regions = new HashMap(pendingAssignQueue.size()); + for (RegionStateNode regionNode: pendingAssignQueue) { + regions.put(regionNode.getRegionInfo(), regionNode); + } + pendingAssignQueue.clear(); + } catch (InterruptedException e) { + LOG.warn("got interrupted ", e); + Thread.currentThread().interrupt(); + } finally { + assignQueueLock.unlock(); + } + return regions; + } + + private void processAssignQueue() { + final HashMap regions = waitOnAssignQueue(); + if (regions == null || regions.size() == 0 || !isRunning()) { + return; + } + + LOG.info("PROCESS ASSIGN QUEUE regionCount=" + regions.size()); + + // TODO: Optimize balancer. pass a RegionPlan? + final HashMap retainMap = new HashMap(); + final List rrList = new ArrayList(); + for (RegionStateNode regionNode: regions.values()) { + if (regionNode.getRegionLocation() != null) { + retainMap.put(regionNode.getRegionInfo(), regionNode.getRegionLocation()); + } else { + rrList.add(regionNode.getRegionInfo()); + } + } + + // TODO: connect with the listener to invalidate the cache + final LoadBalancer balancer = getBalancer(); + + // TODO use events + List servers = master.getServerManager().createDestinationServersList(); + while (servers.size() < 1) { + Threads.sleep(250); + servers = master.getServerManager().createDestinationServersList(); + } + + final boolean isTraceEnabled = LOG.isTraceEnabled(); + if (isTraceEnabled) { + LOG.trace("available servers count=" + servers.size() + ": " + servers); + } + + // ask the balancer where to place regions + if (!retainMap.isEmpty()) { + if (isTraceEnabled) { + LOG.trace("retain assign regions=" + retainMap); + } + try { + acceptPlan(regions, balancer.retainAssignment(retainMap, servers)); + } catch (HBaseIOException e) { + LOG.warn("unable to retain assignment", e); + addToPendingAssignment(regions, retainMap.keySet()); + } + } + + // TODO: Do we need to split retain and round-robin? + // the retain seems to fallback to round-robin/random if the region is not in the map. + if (!rrList.isEmpty()) { + Collections.sort(rrList); + if (isTraceEnabled) { + LOG.trace("round robin regions=" + rrList); + } + try { + acceptPlan(regions, balancer.roundRobinAssignment(rrList, servers)); + } catch (HBaseIOException e) { + LOG.warn("unable to round-robin assignment", e); + addToPendingAssignment(regions, rrList); + } + } + } + + private void acceptPlan(final HashMap regions, + final Map> plan) throws HBaseIOException { + final ProcedureEvent[] events = new ProcedureEvent[regions.size()]; + final long st = System.currentTimeMillis(); + + if (plan == null) { + throw new HBaseIOException("unable to compute plans for regions=" + regions.size()); + } + + if (plan.isEmpty()) return; + + int evcount = 0; + for (Map.Entry> entry: plan.entrySet()) { + final ServerName server = entry.getKey(); + for (HRegionInfo hri: entry.getValue()) { + final RegionStateNode regionNode = regions.get(hri); + regionNode.setRegionLocation(server); + events[evcount++] = regionNode.getProcedureEvent(); + } + } + getProcedureScheduler().wakeEvents(evcount, events); + + final long et = System.currentTimeMillis(); + LOG.info("ASSIGN ACCEPT eventCount=" + events.length + " in " + + StringUtils.humanTimeDiff(et - st)); + } + + private void addToPendingAssignment(final HashMap regions, + final Collection pendingRegions) { + assignQueueLock.lock(); + try { + for (HRegionInfo hri: pendingRegions) { + pendingAssignQueue.add(regions.get(hri)); + } + } finally { + assignQueueLock.unlock(); + } + } + + // ============================================================================================ + // Server Helpers + // ============================================================================================ + @Override + public void serverAdded(final ServerName serverName) { + } + + @Override + public void serverRemoved(final ServerName serverName) { + final ServerStateNode serverNode = regionStateMap.getServerNode(serverName); + if (serverNode == null) return; + + // just in case, wake procedures waiting for this server report + wakeServerReportEvent(serverNode); + } + + public int getServerVersion(final ServerName serverName) { + final ServerStateNode node = regionStateMap.getServerNode(serverName); + return node != null ? node.getVersionNumber() : 0; + } + + public void killRegionServer(final ServerName serverName) { + final ServerStateNode serverNode = regionStateMap.getServerNode(serverName); + killRegionServer(serverNode); + } + + public void killRegionServer(final ServerStateNode serverNode) { + for (RegionStateNode regionNode: serverNode.getRegions()) { + regionNode.setState(State.OFFLINE); + regionNode.setRegionLocation(null); + } + master.getServerManager().expireServer(serverNode.getServerName()); + } + + public void submitServerCrash(final ServerName serverName, final boolean shouldSplitWal) { + boolean carryingMeta = isCarryingMeta(serverName); + ProcedureExecutor procExec = this.master.getMasterProcedureExecutor(); + procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(), serverName, + shouldSplitWal, carryingMeta)); + LOG.debug("Added=" + serverName + + " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java new file mode 100644 index 0000000..916ef5e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java @@ -0,0 +1,147 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData; + +/** + * Procedure that implements a RegionPlan. + * It first runs an unassign subprocedure followed + * by an assign subprocedure. + */ +@InterfaceAudience.Private +public class MoveRegionProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(MoveRegionProcedure.class); + + private RegionPlan plan; + + public MoveRegionProcedure() { + // Required by the Procedure framework to create the procedure on replay + super(); + } + + public MoveRegionProcedure(final RegionPlan plan) { + this.plan = plan; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final MoveRegionState state) + throws InterruptedException { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + switch (state) { + case MOVE_REGION_UNASSIGN: + addChildProcedure(new UnassignProcedure(plan.getRegionInfo(), plan.getDestination(), true)); + setNextState(MoveRegionState.MOVE_REGION_ASSIGN); + break; + case MOVE_REGION_ASSIGN: + addChildProcedure(new AssignProcedure(plan.getRegionInfo(), plan.getDestination())); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final MoveRegionState state) + throws IOException { + // no-op + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + return false; + } + + @Override + public void toStringClassDetails(final StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append("(plan="); + sb.append(plan); + sb.append(")"); + } + + @Override + protected MoveRegionState getInitialState() { + return MoveRegionState.MOVE_REGION_UNASSIGN; + } + + @Override + protected int getStateId(final MoveRegionState state) { + return state.getNumber(); + } + + @Override + protected MoveRegionState getState(final int stateId) { + return MoveRegionState.valueOf(stateId); + } + + @Override + public TableName getTableName() { + return plan.getRegionInfo().getTable(); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.REGION_EDIT; + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + final MoveRegionStateData.Builder state = MoveRegionStateData.newBuilder() + .setRegionInfo(HRegionInfo.convert(plan.getRegionInfo())) + .setSourceServer(ProtobufUtil.toServerName(plan.getSource())) + .setDestinationServer(ProtobufUtil.toServerName(plan.getDestination())); + state.build().writeDelimitedTo(stream); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + final MoveRegionStateData state = MoveRegionStateData.parseDelimitedFrom(stream); + final HRegionInfo regionInfo = HRegionInfo.convert(state.getRegionInfo()); + final ServerName sourceServer = ProtobufUtil.toServerName(state.getSourceServer()); + final ServerName destinationServer = ProtobufUtil.toServerName(state.getDestinationServer()); + this.plan = new RegionPlan(regionInfo, sourceServer, destinationServer); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionAlreadyAssignedException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionAlreadyAssignedException.java new file mode 100644 index 0000000..dc97766 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionAlreadyAssignedException.java @@ -0,0 +1,42 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import org.apache.hadoop.hbase.RegionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RegionAlreadyAssignedException extends RegionException { + private static final long serialVersionUID = 1473510258071111373L; + + /** default constructor */ + public RegionAlreadyAssignedException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public RegionAlreadyAssignedException(String s) { + super(s); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionNotAssignedException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionNotAssignedException.java new file mode 100644 index 0000000..b843f38 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionNotAssignedException.java @@ -0,0 +1,42 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import org.apache.hadoop.hbase.RegionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class RegionNotAssignedException extends RegionException { + private static final long serialVersionUID = 1473510258071111375L; + + /** default constructor */ + public RegionNotAssignedException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public RegionNotAssignedException(String s) { + super(s); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java new file mode 100644 index 0000000..ddedea0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -0,0 +1,212 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.util.Arrays; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.util.MultiHConnection; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.zookeeper.KeeperException; + +import com.google.common.base.Preconditions; + +/** + * Store Region State to hbase:meta table. + */ +@InterfaceAudience.Private +public class RegionStateStore { + private static final Log LOG = LogFactory.getLog(RegionStateStore.class); + + /** The delimiter for meta columns for replicaIds > 0 */ + protected static final char META_REPLICA_ID_DELIMITER = '_'; + + private final MasterServices master; + + private MultiHConnection multiHConnection; + + public RegionStateStore(final MasterServices master) { + this.master = master; + } + + public void start() throws IOException { + } + + public void stop() { + if (multiHConnection != null) { + multiHConnection.close(); + multiHConnection = null; + } + } + + public void updateRegionLocation(final HRegionInfo regionInfo, final State state, + final ServerName regionLocation, final ServerName lastHost, final long openSeqNum) + throws IOException { + if (regionInfo.isMetaRegion()) { + updateMetaLocation(regionInfo, regionLocation); + } else { + updateUserRegionLocation(regionInfo, state, regionLocation, lastHost, openSeqNum); + } + } + + public void updateRegionState(final long openSeqNum, final RegionState newState, + final RegionState oldState) throws IOException { + updateRegionLocation(newState.getRegion(), newState.getState(), newState.getServerName(), + oldState != null ? oldState.getServerName() : null, openSeqNum); + } + + protected void updateMetaLocation(final HRegionInfo regionInfo, final ServerName serverName) + throws IOException { + try { + MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName, + regionInfo.getReplicaId(), State.OPEN); + } catch (KeeperException e) { + throw new IOException(e); + } + } + + protected void updateUserRegionLocation(final HRegionInfo regionInfo, final State state, + final ServerName regionLocation, final ServerName lastHost, final long openSeqNum) + throws IOException { + final int replicaId = regionInfo.getReplicaId(); + final Put put = new Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo)); + final StringBuilder info = new StringBuilder("Updating hbase:meta row "); + info.append(regionInfo.getRegionNameAsString()).append(" with state=").append(state); + if (openSeqNum >= 0) { + Preconditions.checkArgument(state == State.OPEN && regionLocation != null, + "Open region should be on a server"); + MetaTableAccessor.addLocation(put, regionLocation, openSeqNum, -1, replicaId); + info.append(", openSeqNum=").append(openSeqNum); + info.append(", server=").append(regionLocation); + } else if (regionLocation != null && !regionLocation.equals(lastHost)) { + put.addImmutable(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId), + Bytes.toBytes(regionLocation.getServerName())); + info.append(", sn=").append(regionLocation); + } + put.addImmutable(HConstants.CATALOG_FAMILY, getStateColumn(replicaId), + Bytes.toBytes(state.name())); + LOG.info(info); + + final HTableDescriptor htd = master.getTableDescriptors().get(regionInfo.getTable()); + final boolean serialReplication = (htd != null) ? htd.hasSerialReplicationScope() : false; + if (serialReplication && state == State.OPEN) { + Put barrierPut = MetaTableAccessor.makeBarrierPut(regionInfo.getEncodedNameAsBytes(), + openSeqNum, regionInfo.getTable().getName()); + updateRegionLocation(regionInfo, state, put, barrierPut); + } else { + updateRegionLocation(regionInfo, state, put); + } + } + + protected void updateRegionLocation(final HRegionInfo regionInfo, final State state, + final Put... put) throws IOException { + synchronized (this) { + if (multiHConnection == null) { + multiHConnection = new MultiHConnection(master.getConfiguration(), 1); + } + } + + try { + multiHConnection.processBatchCallback(Arrays.asList(put), TableName.META_TABLE_NAME, null, null); + } catch (IOException e) { + String msg = String.format("Failed to persist region=%s state=%s", + regionInfo.getShortNameToLog(), state); + LOG.error(msg, e); + master.abort(msg, e); + throw e; + } + } + + // ========================================================================== + // Server Name + // ========================================================================== + + /** + * Returns the {@link ServerName} from catalog table {@link Result} + * where the region is transitioning. It should be the same as + * {@link MetaTableAccessor#getServerName(Result,int)} if the server is at OPEN state. + * @param r Result to pull the transitioning server name from + * @return A ServerName instance or {@link MetaTableAccessor#getServerName(Result,int)} + * if necessary fields not found or empty. + */ + static ServerName getRegionServer(final Result r, int replicaId) { + final Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, + getServerNameColumn(replicaId)); + if (cell == null || cell.getValueLength() == 0) { + RegionLocations locations = MetaTableAccessor.getRegionLocations(r); + if (locations != null) { + HRegionLocation location = locations.getRegionLocation(replicaId); + if (location != null) { + return location.getServerName(); + } + } + return null; + } + return ServerName.parseServerName(Bytes.toString(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); + } + + private static byte[] getServerNameColumn(int replicaId) { + return replicaId == 0 + ? HConstants.SERVERNAME_QUALIFIER + : Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId)); + } + + // ========================================================================== + // Region State + // ========================================================================== + + /** + * Pull the region state from a catalog table {@link Result}. + * @param r Result to pull the region state from + * @return the region state, or OPEN if there's no value written. + */ + protected State getRegionState(final Result r, int replicaId) { + Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getStateColumn(replicaId)); + if (cell == null || cell.getValueLength() == 0) return State.OPENING; + return State.valueOf(Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + } + + private static byte[] getStateColumn(int replicaId) { + return replicaId == 0 + ? HConstants.STATE_QUALIFIER + : Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId)); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java new file mode 100644 index 0000000..ceb5b67 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java @@ -0,0 +1,709 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.HashMap; +import java.util.HashSet; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.procedure2.ProcedureEvent; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +/** + * RegionStates contains a set of maps that describes the in-memory state of the AM, with + * the regions available in the system, the region in transition, the offline regions and + * the servers holding regions. + */ +@InterfaceAudience.Private +public class RegionStates { + private static final Log LOG = LogFactory.getLog(RegionStates.class); + + protected static final State[] STATES_EXPECTEX_IN_OPEN = new State[] { + State.OFFLINE, State.CLOSED, // disable/offline + State.SPLITTING, State.SPLIT, // ServerCrashProcedure + State.OPENING, State.FAILED_OPEN, // already in-progress (retrying) + }; + + protected static final State[] STATES_EXPECTEX_IN_CLOSE = new State[] { + State.SPLITTING, State.SPLIT, // ServerCrashProcedure + State.OPEN, // enabled/open + State.CLOSING // already in-progress (retrying) + }; + + private static class AssignmentProcedureEvent extends ProcedureEvent { + public AssignmentProcedureEvent(final HRegionInfo regionInfo) { + super(regionInfo); + } + } + + private static class ServerReportEvent extends ProcedureEvent { + public ServerReportEvent(final ServerName serverName) { + super(serverName); + } + } + + public static class RegionStateNode implements Comparable { + private final HRegionInfo regionInfo; + private final ProcedureEvent event; + + private volatile RegionTransitionProcedure procedure = null; + private volatile ServerName regionLocation = null; + private volatile ServerName lastHost = null; + private volatile State state = State.OFFLINE; + private volatile long lastUpdate = 0; + private volatile long openSeqNum = HConstants.NO_SEQNUM; + + public RegionStateNode(final HRegionInfo regionInfo) { + this.regionInfo = regionInfo; + this.event = new AssignmentProcedureEvent(regionInfo); + } + + public boolean setState(final State update, final State... expected) { + final boolean expectedState = isInState(expected); + if (expectedState) { + this.state = update; + } + return expectedState; + } + + public boolean isInState(final State... expected) { + if (expected != null && expected.length > 0) { + boolean expectedState = false; + for (int i = 0; i < expected.length; ++i) { + expectedState |= (state == expected[i]); + } + return expectedState; + } + return true; + } + + public boolean isInTransition() { + return getProcedure() != null; + } + + public long getLastUpdate() { + return procedure != null ? procedure.getLastUpdate() : lastUpdate; + } + + public void setLastHost(final ServerName serverName) { + this.lastHost = serverName; + } + + public void setOpenSeqNum(final long seqId) { + this.openSeqNum = seqId; + } + + public void setRegionLocation(final ServerName serverName) { + this.regionLocation = serverName; + this.lastUpdate = EnvironmentEdgeManager.currentTime(); + } + + public boolean setProcedure(final RegionTransitionProcedure proc) { + if (this.procedure != null && this.procedure != proc) { + return false; + } + this.procedure = proc; + return true; + } + + public boolean unsetProcedure(final RegionTransitionProcedure proc) { + if (this.procedure != null && this.procedure != proc) { + return false; + } + this.procedure = null; + return true; + } + + public RegionTransitionProcedure getProcedure() { + return procedure; + } + + public ProcedureEvent getProcedureEvent() { + return event; + } + + public HRegionInfo getRegionInfo() { + return regionInfo; + } + + public TableName getTable() { + return getRegionInfo().getTable(); + } + + public boolean isSystemTable() { + return getTable().isSystemTable(); + } + + public ServerName getLastHost() { + return lastHost; + } + + public ServerName getRegionLocation() { + return regionLocation; + } + + public State getState() { + return state; + } + + public long getOpenSeqNum() { + return openSeqNum; + } + + public int getFormatVersion() { + // we don't have any format for now + // it should probably be in regionInfo.getFormatVersion() + return 0; + } + + @Override + public int compareTo(final RegionStateNode other) { + // NOTE: HRegionInfo sort by table first, so we are relying on that. + // we have a TestRegionState#testOrderedByTable() that check for that. + return getRegionInfo().compareTo(other.getRegionInfo()); + } + + @Override + public int hashCode() { + return getRegionInfo().hashCode(); + } + + @Override + public boolean equals(final Object other) { + if (this == other) return true; + if (!(other instanceof RegionStateNode)) return false; + return compareTo((RegionStateNode)other) == 0; + } + + @Override + public String toString() { + return toDescriptiveString(); + } + + public String toDescriptiveString() { + return String.format("state=%s, table=%s, region=%s, server=%s", + getState(), getTable(), getRegionInfo().getEncodedName(), getRegionLocation()); + } + } + + // This comparator sorts the RegionStates by time stamp then Region name. + // Comparing by timestamp alone can lead us to discard different RegionStates that happen + // to share a timestamp. + private static class RegionStateStampComparator implements Comparator { + @Override + public int compare(final RegionState l, final RegionState r) { + int stampCmp = Long.compare(l.getStamp(), r.getStamp()); + return stampCmp != 0 ? stampCmp : l.getRegion().compareTo(r.getRegion()); + } + } + + public enum ServerState { ONLINE, SPLITTING, OFFLINE } + public static class ServerStateNode implements Comparable { + private final ServerReportEvent reportEvent; + + private final Set regions; + private final ServerName serverName; + + private volatile ServerState state = ServerState.ONLINE; + private volatile int versionNumber = 0; + + public ServerStateNode(final ServerName serverName) { + this.serverName = serverName; + this.regions = new HashSet(); + this.reportEvent = new ServerReportEvent(serverName); + } + + public ServerName getServerName() { + return serverName; + } + + public ServerState getState() { + return state; + } + + public int getVersionNumber() { + return versionNumber; + } + + public ProcedureEvent getReportEvent() { + return reportEvent; + } + + public boolean isInState(final ServerState... expected) { + boolean expectedState = false; + if (expected != null) { + for (int i = 0; i < expected.length; ++i) { + expectedState |= (state == expected[i]); + } + } + return expectedState; + } + + public void setState(final ServerState state) { + this.state = state; + } + + public void setVersionNumber(final int versionNumber) { + this.versionNumber = versionNumber; + } + + public Set getRegions() { + return regions; + } + + public int getRegionCount() { + return regions.size(); + } + + public ArrayList getRegionInfoList() { + ArrayList hris = new ArrayList(regions.size()); + for (RegionStateNode region: regions) { + hris.add(region.getRegionInfo()); + } + return hris; + } + + public void addRegion(final RegionStateNode regionNode) { + this.regions.add(regionNode); + } + + public void removeRegion(final RegionStateNode regionNode) { + this.regions.remove(regionNode); + } + + @Override + public int compareTo(final ServerStateNode other) { + return getServerName().compareTo(other.getServerName()); + } + + @Override + public int hashCode() { + return getServerName().hashCode(); + } + + @Override + public boolean equals(final Object other) { + if (this == other) return true; + if (!(other instanceof ServerStateNode)) return false; + return compareTo((ServerStateNode)other) == 0; + } + + @Override + public String toString() { + return String.format("ServerStateNode(%s)", getServerName()); + } + } + + public final static RegionStateStampComparator REGION_STATE_STAMP_COMPARATOR = + new RegionStateStampComparator(); + + // TODO: Replace the ConcurrentSkipListMaps + private final ConcurrentSkipListMap regionsMap = + new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + + private final ConcurrentSkipListMap regionInTransition = + new ConcurrentSkipListMap(); + + private final ConcurrentSkipListMap regionOffline = + new ConcurrentSkipListMap(); + + private final ConcurrentHashMap serverMap = + new ConcurrentHashMap(); + + public RegionStates() { } + + public void clear() { + regionsMap.clear(); + regionInTransition.clear(); + regionOffline.clear(); + serverMap.clear(); + } + + // ========================================================================== + // RegionStateNode helpers + // ========================================================================== + protected RegionStateNode createRegionNode(final HRegionInfo regionInfo) { + RegionStateNode newNode = new RegionStateNode(regionInfo); + RegionStateNode oldNode = regionsMap.putIfAbsent(regionInfo.getRegionName(), newNode); + return oldNode != null ? oldNode : newNode; + } + + protected RegionStateNode getOrCreateRegionNode(final HRegionInfo regionInfo) { + RegionStateNode node = regionsMap.get(regionInfo.getRegionName()); + return node != null ? node : createRegionNode(regionInfo); + } + + public RegionStateNode getRegionNodeFromName(final byte[] regionName) { + return regionsMap.get(regionName); + } + + protected RegionStateNode getRegionNode(final HRegionInfo regionInfo) { + return getRegionNodeFromName(regionInfo.getRegionName()); + } + + public RegionStateNode getRegionNodeFromEncodedName(final String encodedRegionName) { + // TODO: Need a map but it is just dispatch merge... + for (RegionStateNode node: regionsMap.values()) { + if (node.getRegionInfo().getEncodedName().equals(encodedRegionName)) { + return node; + } + } + return null; + } + + public void deleteRegion(final HRegionInfo regionInfo) { + regionsMap.remove(regionInfo.getRegionName()); + } + + public ArrayList getTableRegionStateNodes(final TableName tableName) { + final ArrayList regions = new ArrayList(); + for (RegionStateNode node: regionsMap.tailMap(tableName.getName()).values()) { + if (!node.getTable().equals(tableName)) break; + regions.add(node); + } + return regions; + } + + public ArrayList getTableRegionStates(final TableName tableName) { + final ArrayList regions = new ArrayList(); + for (RegionStateNode node: regionsMap.tailMap(tableName.getName()).values()) { + if (!node.getTable().equals(tableName)) break; + regions.add(createRegionState(node)); + } + return regions; + } + + public Collection getRegionNodes() { + return regionsMap.values(); + } + + public ArrayList getRegionStates() { + final ArrayList regions = new ArrayList(regionsMap.size()); + for (RegionStateNode node: regionsMap.values()) { + regions.add(createRegionState(node)); + } + return regions; + } + + // ========================================================================== + // RegionState helpers + // ========================================================================== + public RegionState getRegionState(final HRegionInfo regionInfo) { + return createRegionState(getRegionNode(regionInfo)); + } + + public RegionState getRegionState(final String encodedRegionName) { + return createRegionState(getRegionNodeFromEncodedName(encodedRegionName)); + } + + private RegionState createRegionState(final RegionStateNode node) { + return node == null ? null : + new RegionState(node.getRegionInfo(), node.getState(), + node.getLastUpdate(), node.getRegionLocation()); + } + + // ============================================================================================ + // TODO: helpers + // ============================================================================================ + public boolean hasTableRegionStates(final TableName tableName) { + // TODO + return getTableRegionStates(tableName).size() > 0; + } + + public List getRegionsOfTable(final TableName table) { + final ArrayList nodes = getTableRegionStateNodes(table); + final ArrayList hris = new ArrayList(nodes.size()); + for (RegionStateNode node: nodes) { + hris.add(node.getRegionInfo()); + } + return hris; + } + + /** + * Returns the set of regions hosted by the specified server + * @param serverName the server we are interested in + * @return set of HRegionInfo hosted by the specified server + */ + public List getServerRegionInfoSet(final ServerName serverName) { + final ServerStateNode serverInfo = getServerNode(serverName); + if (serverInfo == null) return Collections.emptyList(); + + synchronized (serverInfo) { + return serverInfo.getRegionInfoList(); + } + } + + // ============================================================================================ + // TODO: split helpers + // ============================================================================================ + public void logSplit(final ServerName serverName) { + final ServerStateNode serverNode = getOrCreateServer(serverName); + synchronized (serverNode) { + serverNode.setState(ServerState.SPLITTING); + for (RegionStateNode regionNode: serverNode.getRegions()) { + synchronized (regionNode) { + // TODO: Abort procedure if present + regionNode.setState(State.SPLITTING); + } + } + } + } + + public void logSplit(final HRegionInfo regionInfo) { + final RegionStateNode regionNode = getRegionNode(regionInfo); + synchronized (regionNode) { + regionNode.setState(State.SPLIT); + } + } + + public void updateRegionState(final HRegionInfo regionInfo, final State state) { + // TODO: Remove me, used by TestSplitTransactioOnCluster + } + + // ============================================================================================ + // TODO: + // ============================================================================================ + public List getAssignedRegions() { + final List result = new ArrayList(); + for (RegionStateNode node: regionsMap.values()) { + if (!node.isInTransition()) { + result.add(node.getRegionInfo()); + } + } + return result; + } + + public boolean isRegionInState(final HRegionInfo regionInfo, final State... state) { + RegionStateNode region = getRegionNode(regionInfo); + synchronized (region) { + return region.isInState(state); + } + } + + public boolean isRegionOnline(final HRegionInfo regionInfo) { + return isRegionInState(regionInfo, State.OPEN); + } + + public Map> getSnapShotOfAssignment( + final Collection regions) { + final Map> result = new HashMap>(); + for (HRegionInfo hri: regions) { + final RegionStateNode node = getRegionNode(hri); + if (node == null) continue; + + // TODO: State.OPEN + final ServerName serverName = node.getRegionLocation(); + if (serverName == null) continue; + + List serverRegions = result.get(serverName); + if (serverRegions == null) { + serverRegions = new ArrayList(); + result.put(serverName, serverRegions); + } + + serverRegions.add(node.getRegionInfo()); + } + return result; + } + + public Map getRegionAssignments() { + final HashMap assignments = new HashMap(); + for (RegionStateNode node: regionsMap.values()) { + assignments.put(node.getRegionInfo(), node.getRegionLocation()); + } + return assignments; + } + + public Map> getRegionByStateOfTable(TableName tableName) { + final State[] states = State.values(); + final Map> tableRegions = + new HashMap>(states.length); + for (int i = 0; i < states.length; ++i) { + tableRegions.put(states[i], new ArrayList()); + } + + for (RegionStateNode node: regionsMap.values()) { + tableRegions.get(node.getState()).add(node.getRegionInfo()); + } + return tableRegions; + } + + public ServerName getRegionServerOfRegion(final HRegionInfo regionInfo) { + RegionStateNode region = getRegionNode(regionInfo); + synchronized (region) { + return region.getRegionLocation(); + } + } + + public Map>> getAssignmentsByTable() { + final Map>> result = + new HashMap>>(); + for (RegionStateNode node: regionsMap.values()) { + Map> tableResult = result.get(node.getTable()); + if (tableResult == null) { + tableResult = new HashMap>(); + result.put(node.getTable(), tableResult); + } + + final ServerName serverName = node.getRegionLocation(); + List serverResult = tableResult.get(serverName); + if (serverResult == null) { + serverResult = new ArrayList(); + tableResult.put(serverName, serverResult); + } + + serverResult.add(node.getRegionInfo()); + } + return result; + } + + // ========================================================================== + // Region in transition helpers + // ========================================================================== + protected boolean addRegionInTransition(final RegionStateNode regionNode, + final RegionTransitionProcedure procedure) { + if (procedure != null && !regionNode.setProcedure(procedure)) return false; + + regionInTransition.put(regionNode.getRegionInfo(), regionNode); + return true; + } + + protected void removeRegionInTransition(final RegionStateNode regionNode, + final RegionTransitionProcedure procedure) { + regionInTransition.remove(regionNode.getRegionInfo()); + regionNode.unsetProcedure(procedure); + } + + public boolean hasRegionsInTransition() { + return !regionInTransition.isEmpty(); + } + + public boolean isRegionInTransition(final HRegionInfo regionInfo) { + final RegionStateNode node = regionInTransition.get(regionInfo); + return node != null ? node.isInTransition() : false; + } + + public RegionState getRegionTransitionState(final HRegionInfo hri) { + RegionStateNode node = regionInTransition.get(hri); + if (node == null) return null; + + synchronized (node) { + return node.isInTransition() ? createRegionState(node) : null; + } + } + + public List getRegionsInTransition() { + return new ArrayList(regionInTransition.values()); + } + + public List getRegionsStateInTransition() { + final List rit = new ArrayList(regionInTransition.size()); + for (RegionStateNode node: regionInTransition.values()) { + rit.add(createRegionState(node)); + } + return rit; + } + + public SortedSet getRegionsInTransitionOrderedByTimestamp() { + final SortedSet rit = new TreeSet(REGION_STATE_STAMP_COMPARATOR); + for (RegionStateNode node: regionInTransition.values()) { + rit.add(createRegionState(node)); + } + return rit; + } + + // ========================================================================== + // Region offline helpers + // ========================================================================== + public void addToOfflineRegions(final RegionStateNode regionNode) { + regionOffline.put(regionNode.getRegionInfo(), regionNode); + } + + public void removeFromOfflineRegions(final HRegionInfo regionInfo) { + regionOffline.remove(regionInfo); + } + + // ========================================================================== + // Servers + // ========================================================================== + public ServerStateNode getOrCreateServer(final ServerName serverName) { + ServerStateNode node = serverMap.get(serverName); + if (node == null) { + node = new ServerStateNode(serverName); + ServerStateNode oldNode = serverMap.putIfAbsent(serverName, node); + node = oldNode != null ? oldNode : node; + } + return node; + } + + public void removeServer(final ServerName serverName) { + serverMap.remove(serverName); + } + + protected ServerStateNode getServerNode(final ServerName serverName) { + return serverMap.get(serverName); + } + + public double getAverageLoad() { + int numServers = 0; + int totalLoad = 0; + for (ServerStateNode node: serverMap.values()) { + // TODO: Not used! + int regionCount = node.getRegionCount(); + totalLoad++; + numServers++; + } + return numServers == 0 ? 0.0 : (double)totalLoad / (double)numServers; + } + + public ServerStateNode addRegionToServer(final ServerName serverName, + final RegionStateNode regionNode) { + ServerStateNode serverNode = getOrCreateServer(serverName); + serverNode.addRegion(regionNode); + return serverNode; + } + + public ServerStateNode removeRegionFromServer(final ServerName serverName, + final RegionStateNode regionNode) { + ServerStateNode serverNode = getOrCreateServer(serverName); + serverNode.removeRegion(regionNode); + return serverNode; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java new file mode 100644 index 0000000..8c6003c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java @@ -0,0 +1,313 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; + +/** + * Base class for the Assign and Unassign Procedure. + * There can only be one RegionTransitionProcedure per region running at a time + * since each procedure takes a lock on the region (see MasterProcedureScheduler). + * + *

This procedure is asynchronous and responds to external events. + * The AssignmentManager will notify this procedure when the RS completes + * the operation and reports the transitioned state + * (see the Assign and Unassign class for more details). + */ +@InterfaceAudience.Private +public abstract class RegionTransitionProcedure + extends Procedure + implements TableProcedureInterface, + RemoteProcedure { + private static final Log LOG = LogFactory.getLog(RegionTransitionProcedure.class); + + protected final AtomicBoolean aborted = new AtomicBoolean(false); + + private RegionTransitionState transitionState = + RegionTransitionState.REGION_TRANSITION_QUEUE; + private RegionStateNode regionNode = null; + private HRegionInfo regionInfo; + private boolean hasLock = false; + + public RegionTransitionProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public RegionTransitionProcedure(final HRegionInfo regionInfo) { + this.regionInfo = regionInfo; + } + + public HRegionInfo getRegionInfo() { + return regionInfo; + } + + protected void setRegionInfo(final HRegionInfo regionInfo) { + // Setter is for deserialization. + this.regionInfo = regionInfo; + } + + @Override + public TableName getTableName() { + return getRegionInfo().getTable(); + } + + public boolean isMeta() { + return TableName.isMetaTableName(getTableName()); + } + + @Override + public void toStringClassDetails(final StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" table="); + sb.append(getTableName()); + sb.append(", region="); + sb.append(getRegionInfo().getEncodedName()); + if (regionNode != null) { + sb.append(", server="); + sb.append(regionNode.getRegionLocation()); + } + } + + public RegionStateNode getRegionState(final MasterProcedureEnv env) { + if (regionNode == null) { + regionNode = env.getAssignmentManager() + .getRegionStates().getOrCreateRegionNode(getRegionInfo()); + } + return regionNode; + } + + protected void setTransitionState(final RegionTransitionState state) { + this.transitionState = state; + } + + protected RegionTransitionState getTransitionState() { + return transitionState; + } + + protected abstract boolean startTransition(MasterProcedureEnv env, RegionStateNode regionNode) + throws IOException, ProcedureSuspendedException; + protected abstract boolean updateTransition(MasterProcedureEnv env, RegionStateNode regionNode) + throws IOException, ProcedureSuspendedException; + protected abstract void completeTransition(MasterProcedureEnv env, RegionStateNode regionNode) + throws IOException, ProcedureSuspendedException; + + protected abstract void reportTransition(MasterProcedureEnv env, + RegionStateNode regionNode, TransitionCode code, long seqId) throws UnexpectedStateException; + + public abstract RemoteOperation remoteCallBuild(MasterProcedureEnv env, ServerName serverName); + protected abstract void remoteCallFailed(MasterProcedureEnv env, + RegionStateNode regionNode, IOException exception); + + @Override + public void remoteCallCompleted(final MasterProcedureEnv env, + final ServerName serverName, final RemoteOperation response) { + // Ignore the response? reportTransition() is the one that count? + } + + @Override + public void remoteCallFailed(final MasterProcedureEnv env, + final ServerName serverName, final IOException exception) { + final RegionStateNode regionNode = getRegionState(env); + assert serverName.equals(regionNode.getRegionLocation()); // TODO + LOG.warn("Remote call failed " + regionNode + ": " + exception.getMessage()); + remoteCallFailed(env, regionNode, exception); + env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent()); + } + + protected void addToRemoteDispatcher(final MasterProcedureEnv env, + final ServerName targetServer) { + assert targetServer.equals(getRegionState(env).getRegionLocation()) : + "targetServer=" + targetServer + " getRegionLocation=" + getRegionState(env).getRegionLocation(); // TODO + + LOG.info("ADD TO REMOTE DISPATCHER " + getRegionState(env) + ": " + targetServer); + + // Add the open region operation to the server dispatch queue. + // The pending close will be dispatched to the server together with the other + // pending operation for that server. + env.getProcedureScheduler().suspendEvent(getRegionState(env).getProcedureEvent()); + + // TODO: If the server is gone... go on failure/retry + env.getRemoteDispatcher().getNode(targetServer).add(this); + } + + protected void reportTransition(final MasterProcedureEnv env, final ServerName serverName, + final TransitionCode code, final long seqId) throws UnexpectedStateException { + final RegionStateNode regionNode = getRegionState(env); + if (!serverName.equals(regionNode.getRegionLocation())) { + if (isMeta() && regionNode.getRegionLocation() == null) { + regionNode.setRegionLocation(serverName); + } else { + throw new UnexpectedStateException(String.format( + "reported unexpected transition state=%s from server=%s on region=%s, expected server=%s", + code, serverName, regionNode.getRegionInfo(), regionNode.getRegionLocation())); + } + } + + reportTransition(env, regionNode, code, seqId); + env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent()); + } + + protected boolean isServerOnline(final MasterProcedureEnv env, final RegionStateNode regionNode) { + return isServerOnline(env, regionNode.getRegionLocation()); + } + + protected boolean isServerOnline(final MasterProcedureEnv env, final ServerName serverName) { + return env.getMasterServices().getServerManager().isServerOnline(serverName); + } + + @Override + protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException { + final AssignmentManager am = env.getAssignmentManager(); + final RegionStateNode regionNode = getRegionState(env); + LOG.debug("" + transitionState + " " + this); + if (!am.addRegionInTransition(regionNode, this)) { + String msg = String.format( + "There is already another procedure running on this region this=%s owner=%s", + this, regionNode.getProcedure()); + LOG.warn(msg); + setAbortFailure(getClass().getSimpleName(), msg); + return null; + } + + try { + boolean retry; + do { + retry = false; + switch (transitionState) { + case REGION_TRANSITION_QUEUE: + // 1. push into the AM queue for balancer policy + if (!startTransition(env, regionNode)) { + // the operation aborted, check getException() + am.removeRegionInTransition(regionNode, this); + return null; + } + transitionState = RegionTransitionState.REGION_TRANSITION_DISPATCH; + if (env.getProcedureScheduler().waitEvent(regionNode.getProcedureEvent(), this)) { + throw new ProcedureSuspendedException(); + } + break; + + case REGION_TRANSITION_DISPATCH: + // 2. send the request to the target server + if (!updateTransition(env, regionNode)) { + // The operation aborted, check getException() + am.removeRegionInTransition(regionNode, this); + return null; + } + if (transitionState != RegionTransitionState.REGION_TRANSITION_DISPATCH) { + retry = true; + break; + } + if (env.getProcedureScheduler().waitEvent(regionNode.getProcedureEvent(), this)) { + throw new ProcedureSuspendedException(); + } + break; + + case REGION_TRANSITION_FINISH: + // 3. wait assignment response. completion/failure + completeTransition(env, regionNode); + am.removeRegionInTransition(regionNode, this); + return null; + } + } while (retry); + } catch (IOException e) { + LOG.warn("Retriable error trying to transition: " + regionNode, e); + } + + return new Procedure[] { this }; + } + + @Override + protected void rollback(final MasterProcedureEnv env) { + if (isRollbackSupported(transitionState)) { + // Nothing done up to this point. abort safely. + // This should happen when something like disableTable() is triggered. + env.getAssignmentManager().removeRegionInTransition(regionNode, this); + return; + } + + // There is no rollback for assignment unless we cancel the operation by + // dropping/disabling the table. + throw new UnsupportedOperationException("unhandled state " + transitionState); + } + + protected abstract boolean isRollbackSupported(final RegionTransitionState state); + + @Override + protected boolean abort(final MasterProcedureEnv env) { + if (isRollbackSupported(transitionState)) { + aborted.set(true); + return true; + } + return false; + } + + @Override + protected LockState acquireLock(final MasterProcedureEnv env) { + // unless we are assigning meta, wait for meta to be available and loaded. + if (!isMeta() && (env.waitFailoverCleanup(this) || + env.getAssignmentManager().waitMetaInitialized(this, getRegionInfo()))) { + return LockState.LOCK_EVENT_WAIT; + } + + // TODO: Revisit this and move it to the executor + return env.getProcedureScheduler().waitRegion(this, getRegionInfo())? + LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED; + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureScheduler().wakeRegion(this, getRegionInfo()); + hasLock = false; + } + + protected boolean holdLock(final MasterProcedureEnv env) { + return true; + } + + protected boolean hasLock(final MasterProcedureEnv env) { + return hasLock; + } + + @Override + protected boolean shouldWaitClientAck(MasterProcedureEnv env) { + // The operation is triggered internally on the server + // the client does not know about this procedure. + return false; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java new file mode 100644 index 0000000..f18d663 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java @@ -0,0 +1,216 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionCloseOperation; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UnassignRegionStateData; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; + + +/** + * Procedure that describe the unassignment of a single region. + * There can only be one RegionTransitionProcedure per region running at the time, + * since each procedure takes a lock on the region. + * + *

The Unassign starts by placing a "close region" request in the Remote Dispatcher + * queue, and the procedure will then go into a "waiting state". + * The Remote Dispatcher will batch the various requests for that server and + * they will be sent to the RS for execution. + * The RS will complete the open operation by calling master.reportRegionStateTransition(). + * The AM will intercept the transition report, and notify the procedure. + * The procedure will finish the unassign by publishing its new state on meta + * or it will retry the unassign. + */ +@InterfaceAudience.Private +public class UnassignProcedure extends RegionTransitionProcedure { + private static final Log LOG = LogFactory.getLog(UnassignProcedure.class); + + private final AtomicBoolean serverCrashed = new AtomicBoolean(false); + + // TODO: should this be in a reassign procedure? + // ...and keep unassign for 'disable' case? + private ServerName destinationServer; + private boolean force; + + public UnassignProcedure() { + // Required by the Procedure framework to create the procedure on replay + super(); + } + + public UnassignProcedure(final HRegionInfo regionInfo, + final ServerName destinationServer, final boolean force) { + super(regionInfo); + this.destinationServer = destinationServer; + this.force = force; + + // we don't need REGION_TRANSITION_QUEUE, we jump directly to sending the request + setTransitionState(RegionTransitionState.REGION_TRANSITION_DISPATCH); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.UNASSIGN; + } + + @Override + protected boolean isRollbackSupported(final RegionTransitionState state) { + switch (state) { + case REGION_TRANSITION_QUEUE: + case REGION_TRANSITION_DISPATCH: + return true; + default: + return false; + } + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + final UnassignRegionStateData.Builder state = UnassignRegionStateData.newBuilder() + .setTransitionState(getTransitionState()) + .setRegionInfo(HRegionInfo.convert(getRegionInfo())); + if (destinationServer != null) { + state.setDestinationServer(ProtobufUtil.toServerName(destinationServer)); + } + if (force) { + state.setForce(true); + } + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + final UnassignRegionStateData state = UnassignRegionStateData.parseDelimitedFrom(stream); + setTransitionState(state.getTransitionState()); + setRegionInfo(HRegionInfo.convert(state.getRegionInfo())); + force = state.getForce(); + if (state.hasDestinationServer()) { + destinationServer = ProtobufUtil.toServerName(state.getDestinationServer()); + } + } + + @Override + protected boolean startTransition(final MasterProcedureEnv env, final RegionStateNode regionNode) { + // nothing to do here. we skip the step in the constructor + // by jumping to REGION_TRANSITION_DISPATCH + throw new UnsupportedOperationException(); + } + + @Override + protected boolean updateTransition(final MasterProcedureEnv env, final RegionStateNode regionNode) + throws IOException { + // if the region is already closed or offline we can't do much... + if (regionNode.isInState(State.CLOSED, State.OFFLINE)) { + LOG.info("Not assigned " + regionNode); + return false; + } + + // if the server is down, mark the operation as complete + if (serverCrashed.get() || !isServerOnline(env, regionNode)) { + LOG.info("Server already down: " + regionNode); + return false; + } + + // if we haven't started the operation yet, we can abort + if (aborted.get() && regionNode.isInState(State.OPEN)) { + setAbortFailure(getClass().getSimpleName(), "abort requested"); + return false; + } + + // Mark the region as closing + env.getAssignmentManager().markRegionAsClosing(regionNode); + + // Add the close region operation the the server dispatch queue. + // The pending close will be dispatched to the server together with the other + // pending operation for that server. + addToRemoteDispatcher(env, regionNode.getRegionLocation()); + return true; + } + + @Override + protected void completeTransition(final MasterProcedureEnv env, final RegionStateNode regionNode) + throws IOException { + env.getAssignmentManager().markRegionAsClosed(regionNode); + } + + @Override + public RemoteOperation remoteCallBuild(final MasterProcedureEnv env, final ServerName serverName) { + assert serverName.equals(getRegionState(env).getRegionLocation()); + return new RegionCloseOperation(this, getRegionInfo(), destinationServer); + } + + @Override + protected void reportTransition(final MasterProcedureEnv env, final RegionStateNode regionNode, + final TransitionCode code, final long seqId) throws UnexpectedStateException { + switch (code) { + case CLOSED: + setTransitionState(RegionTransitionState.REGION_TRANSITION_FINISH); + break; + default: + throw new UnexpectedStateException(String.format( + "reported unexpected transition state=%s for region=%s server=%s, expected CLOSED.", + code, regionNode.getRegionInfo(), regionNode.getRegionLocation())); + } + } + + @Override + protected void remoteCallFailed(final MasterProcedureEnv env, final RegionStateNode regionNode, + final IOException exception) { + if (exception instanceof RegionServerAbortedException || + exception instanceof RegionServerStoppedException || + exception instanceof ServerNotRunningYetException) { + // TODO + // RS is aborting, we cannot offline the region since the region may need to do WAL + // recovery. Until we see the RS expiration, we should retry. + serverCrashed.set(true); + } else if (exception instanceof NotServingRegionException) { + // TODO + serverCrashed.set(true); + } else { + // TODO: kill the server in case we get an exception we are not able to handle + LOG.warn("killing server because we are not able to close the region=" + regionNode + + " exception=" + exception); + env.getMasterServices().getServerManager().expireServer(regionNode.getRegionLocation()); + serverCrashed.set(true); + } + // ...TODO + setTransitionState(RegionTransitionState.REGION_TRANSITION_DISPATCH); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index f27feb3..fedd95b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -63,7 +63,7 @@ import com.google.common.collect.Sets; /** * The base class for load balancers. It provides the the functions used to by - * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions + * {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} to assign regions * in the edge cases. It doesn't provide an implementation of the * actual balancing algorithm. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index d5edfab..c56a334 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; -import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; @@ -39,9 +38,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -149,19 +147,15 @@ class RegionLocationFinder { if (services == null) { return false; } - AssignmentManager am = services.getAssignmentManager(); + final AssignmentManager am = services.getAssignmentManager(); if (am == null) { return false; } - RegionStates regionStates = am.getRegionStates(); - if (regionStates == null) { - return false; - } - Set regions = regionStates.getRegionAssignments().keySet(); + // TODO: Should this refresh all the regions or only the ones assigned? boolean includesUserTables = false; - for (final HRegionInfo hri : regions) { + for (final HRegionInfo hri : am.getAssignedRegions()) { cache.refresh(hri); includesUserTables = includesUserTables || !hri.isSystemTable(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index a6a0774..ee3083c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -20,28 +20,27 @@ package org.apache.hadoop.hbase.master.balancer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Random; import java.util.TreeMap; -import java.util.Comparator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.util.Pair; import com.google.common.collect.MinMaxPriorityQueue; -import org.apache.hadoop.hbase.util.Pair; /** * Makes decisions about the placement and movement of Regions across @@ -54,7 +53,7 @@ import org.apache.hadoop.hbase.util.Pair; * locations for all Regions in a cluster. * *

This classes produces plans for the - * {@link org.apache.hadoop.hbase.master.AssignmentManager} to execute. + * {@link org.apache.hadoop.hbase.master.assignment.AssignmentManager} to execute. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SimpleLoadBalancer extends BaseLoadBalancer { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 8825637..a43ba42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -30,7 +30,7 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.commons.math.stat.descriptive.DescriptiveStatistics; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; @@ -156,23 +157,16 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { @Override public synchronized void setConf(Configuration conf) { super.setConf(conf); - LOG.info("loading config"); - maxSteps = conf.getInt(MAX_STEPS_KEY, maxSteps); - stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion); maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime); - numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, numRegionLoadsToRemember); isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, isByTable); - minCostNeedBalance = conf.getFloat(MIN_COST_NEED_BALANCE_KEY, minCostNeedBalance); - if (localityCandidateGenerator == null) { localityCandidateGenerator = new LocalityBasedCandidateGenerator(services); } localityCost = new LocalityCostFunction(conf, services); - if (candidateGenerators == null) { candidateGenerators = new CandidateGenerator[] { new RandomCandidateGenerator(), @@ -181,17 +175,14 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { new RegionReplicaRackCandidateGenerator(), }; } - regionLoadFunctions = new CostFromRegionLoadFunction[] { new ReadRequestCostFunction(conf), new WriteRequestCostFunction(conf), new MemstoreSizeCostFunction(conf), new StoreFileCostFunction(conf) }; - regionReplicaHostCostFunction = new RegionReplicaHostCostFunction(conf); regionReplicaRackCostFunction = new RegionReplicaRackCostFunction(conf); - costFunctions = new CostFunction[]{ new RegionCountSkewCostFunction(conf), new PrimaryRegionCountSkewCostFunction(conf), @@ -205,10 +196,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { regionLoadFunctions[2], regionLoadFunctions[3], }; - curFunctionCosts= new Double[costFunctions.length]; tempFunctionCosts= new Double[costFunctions.length]; - + LOG.info("Loaded config; maxSteps=" + maxSteps + ", stepsPerRegion=" + stepsPerRegion + + ", maxRunningTime=" + maxRunningTime + ", isByTable=" + isByTable + ", etc."); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java index e957f9d..02d1472 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java @@ -40,6 +40,7 @@ public abstract class AbstractStateMachineTableProcedure private final ProcedurePrepareLatch syncLatch; private User user; + private boolean hasLock; protected AbstractStateMachineTableProcedure() { // Required by the Procedure framework to create the procedure on replay @@ -111,4 +112,4 @@ public abstract class AbstractStateMachineTableProcedure throw new TableNotFoundException(getTableName()); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java index 7bb2887..34c1853 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -100,7 +99,10 @@ public class AddColumnFamilyProcedure setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS); break; case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS: - reOpenAllRegionsIfTableIsOnline(env); + if (env.getAssignmentManager().isTableEnabled(getTableName())) { + addChildProcedure(env.getAssignmentManager() + .createReopenProcedures(getRegionInfoList(env))); + } return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); @@ -285,7 +287,8 @@ public class AddColumnFamilyProcedure env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); // Make sure regions are opened after table descriptor is updated. - reOpenAllRegionsIfTableIsOnline(env); + //reOpenAllRegionsIfTableIsOnline(env); + // TODO: NUKE ROLLBACK!!!! } } @@ -302,25 +305,6 @@ public class AddColumnFamilyProcedure } /** - * Last action from the procedure - executed when online schema change is supported. - * @param env MasterProcedureEnv - * @throws IOException - */ - private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { - // This operation only run when the table is enabled. - if (!env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - return; - } - - if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { - LOG.info("Completed add column family operation on table " + getTableName()); - } else { - LOG.warn("Error on reopening the regions on table " + getTableName()); - } - } - - /** * The procedure could be restarted from a different machine. If the variable is null, we need to * retrieve it. * @return traceEnabled @@ -362,7 +346,8 @@ public class AddColumnFamilyProcedure private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { if (regionInfoList == null) { - regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + regionInfoList = env.getAssignmentManager().getRegionStates() + .getRegionsOfTable(getTableName()); } return regionInfoList; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index aefd14c..21ece9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -134,10 +134,12 @@ public class CloneSnapshotProcedure setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ASSIGN_REGIONS); break; case CLONE_SNAPSHOT_ASSIGN_REGIONS: - CreateTableProcedure.assignRegions(env, getTableName(), newRegions); + CreateTableProcedure.setEnablingState(env, getTableName()); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(newRegions)); setNextState(CloneSnapshotState.CLONE_SNAPSHOT_UPDATE_DESC_CACHE); break; case CLONE_SNAPSHOT_UPDATE_DESC_CACHE: + CreateTableProcedure.setEnabledState(env, getTableName()); CreateTableProcedure.updateTableDescCache(env, getTableName()); setNextState(CloneSnapshotState.CLONE_SNAPSHOT_POST_OPERATION); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index 2421dfc..dda94c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -107,10 +106,12 @@ public class CreateTableProcedure setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS); break; case CREATE_TABLE_ASSIGN_REGIONS: - assignRegions(env, getTableName(), newRegions); + setEnablingState(env, getTableName()); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(newRegions)); setNextState(CreateTableState.CREATE_TABLE_UPDATE_DESC_CACHE); break; case CREATE_TABLE_UPDATE_DESC_CACHE: + setEnabledState(env, getTableName()); updateTableDescCache(env, getTableName()); setNextState(CreateTableState.CREATE_TABLE_POST_OPERATION); break; @@ -333,21 +334,21 @@ public class CreateTableProcedure protected static List addTableToMeta(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, final List regions) throws IOException { - if (regions != null && regions.size() > 0) { - ProcedureSyncWait.waitMetaRegions(env); + assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions; - // Add regions to META - addRegionsToMeta(env, hTableDescriptor, regions); - // Add replicas if needed - List newRegions = addReplicas(env, hTableDescriptor, regions); + ProcedureSyncWait.waitMetaRegions(env); - // Setup replication for region replicas if needed - if (hTableDescriptor.getRegionReplication() > 1) { - ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); - } - return newRegions; + // Add replicas if needed + List newRegions = addReplicas(env, hTableDescriptor, regions); + + // Add regions to META + addRegionsToMeta(env, hTableDescriptor, newRegions); + + // Setup replication for region replicas if needed + if (hTableDescriptor.getRegionReplication() > 1) { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); } - return regions; + return newRegions; } /** @@ -375,18 +376,16 @@ public class CreateTableProcedure return hRegionInfos; } - protected static void assignRegions(final MasterProcedureEnv env, - final TableName tableName, final List regions) throws IOException { - ProcedureSyncWait.waitRegionServers(env); + protected static void setEnablingState(final MasterProcedureEnv env, final TableName tableName) + throws IOException { // Mark the table as Enabling env.getMasterServices().getTableStateManager() .setTableState(tableName, TableState.State.ENABLING); + } - // Trigger immediate assignment of the regions in round-robin fashion - final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager(); - ModifyRegionUtils.assignRegions(assignmentManager, regions); - + protected static void setEnabledState(final MasterProcedureEnv env, final TableName tableName) + throws IOException { // Enable table env.getMasterServices().getTableStateManager() .setTableState(tableName, TableState.State.ENABLED); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java index 096172a..78bd715 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -106,7 +105,10 @@ public class DeleteColumnFamilyProcedure setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS); break; case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: - reOpenAllRegionsIfTableIsOnline(env); + if (env.getAssignmentManager().isTableEnabled(getTableName())) { + addChildProcedure(env.getAssignmentManager() + .createReopenProcedures(getRegionInfoList(env))); + } return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); @@ -292,7 +294,8 @@ public class DeleteColumnFamilyProcedure env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); // Make sure regions are opened after table descriptor is updated. - reOpenAllRegionsIfTableIsOnline(env); + //reOpenAllRegionsIfTableIsOnline(env); + // TODO: NUKE ROLLBACK!!!! } /** @@ -316,25 +319,6 @@ public class DeleteColumnFamilyProcedure } /** - * Last action from the procedure - executed when online schema change is supported. - * @param env MasterProcedureEnv - * @throws IOException - */ - private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { - // This operation only run when the table is enabled. - if (!env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - return; - } - - if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { - LOG.info("Completed delete column family operation on table " + getTableName()); - } else { - LOG.warn("Error on reopening the regions on table " + getTableName()); - } - } - - /** * The procedure could be restarted from a different machine. If the variable is null, we need to * retrieve it. * @return traceEnabled @@ -376,7 +360,8 @@ public class DeleteColumnFamilyProcedure private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { if (regionInfoList == null) { - regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + regionInfoList = env.getAssignmentManager().getRegionStates() + .getRegionsOfTable(getTableName()); } return regionInfoList; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 06b666b..d76ebf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.mob.MobConstants; @@ -98,7 +97,7 @@ public class DeleteTableProcedure // TODO: Move out... in the acquireLock() LOG.debug("waiting for '" + getTableName() + "' regions in transition"); - regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + regions = env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()); assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; ProcedureSyncWait.waitRegionInTransition(env, regions); @@ -341,8 +340,7 @@ public class DeleteTableProcedure final TableName tableName) throws IOException { Connection connection = env.getMasterServices().getConnection(); Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); - try (Table metaTable = - connection.getTable(TableName.META_TABLE_NAME)) { + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { List deletes = new ArrayList(); try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { for (Result result : resScanner) { @@ -376,11 +374,9 @@ public class DeleteTableProcedure protected static void deleteAssignmentState(final MasterProcedureEnv env, final TableName tableName) throws IOException { - final AssignmentManager am = env.getMasterServices().getAssignmentManager(); - // Clean up regions of the table in RegionStates. LOG.debug("Removing '" + tableName + "' from region states."); - am.getRegionStates().tableDeleted(tableName); + env.getMasterServices().getAssignmentManager().deleteTable(tableName); // If entry for this table states, remove it. LOG.debug("Marking '" + tableName + "' as deleted."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index b53ce45..4d45af3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -21,12 +21,9 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.List; -import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; @@ -34,17 +31,11 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.BulkAssigner; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.htrace.Trace; @InterfaceAudience.Private public class DisableTableProcedure @@ -116,12 +107,8 @@ public class DisableTableProcedure setNextState(DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE); break; case DISABLE_TABLE_MARK_REGIONS_OFFLINE: - if (markRegionsOffline(env, tableName, true) == - MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) { - setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE); - } else { - LOG.trace("Retrying later to disable the missing regions"); - } + addChildProcedure(env.getAssignmentManager().createUnassignProcedures(tableName)); + setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE); break; case DISABLE_TABLE_SET_DISABLED_TABLE_STATE: setTableStateToDisabled(env, tableName); @@ -290,83 +277,6 @@ public class DisableTableProcedure } /** - * Mark regions of the table offline with retries - * @param env MasterProcedureEnv - * @param tableName the target table - * @param retryRequired whether to retry if the first run failed - * @return whether the operation is fully completed or being interrupted. - * @throws IOException - */ - protected static MarkRegionOfflineOpResult markRegionsOffline( - final MasterProcedureEnv env, - final TableName tableName, - final Boolean retryRequired) throws IOException { - // Dev consideration: add a config to control max number of retry. For now, it is hard coded. - int maxTry = (retryRequired ? 10 : 1); - MarkRegionOfflineOpResult operationResult = - MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED; - do { - try { - operationResult = markRegionsOffline(env, tableName); - if (operationResult == MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) { - break; - } - maxTry--; - } catch (Exception e) { - LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e); - maxTry--; - if (maxTry > 0) { - continue; // we still have some retry left, try again. - } - throw e; - } - } while (maxTry > 0); - - if (operationResult != MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) { - LOG.warn("Some or all regions of the Table '" + tableName + "' were still online"); - } - - return operationResult; - } - - /** - * Mark regions of the table offline - * @param env MasterProcedureEnv - * @param tableName the target table - * @return whether the operation is fully completed or being interrupted. - * @throws IOException - */ - private static MarkRegionOfflineOpResult markRegionsOffline( - final MasterProcedureEnv env, - final TableName tableName) throws IOException { - // Get list of online regions that are of this table. Regions that are - // already closed will not be included in this list; i.e. the returned - // list is not ALL regions in a table, its all online regions according - // to the in-memory state on this master. - MarkRegionOfflineOpResult operationResult = - MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL; - final List regions = - env.getMasterServices().getAssignmentManager().getRegionStates() - .getRegionsOfTable(tableName); - if (regions.size() > 0) { - LOG.info("Offlining " + regions.size() + " regions."); - - BulkDisabler bd = new BulkDisabler(env, tableName, regions); - try { - if (!bd.bulkAssign()) { - operationResult = MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED; - } - } catch (InterruptedException e) { - LOG.warn("Disable was interrupted"); - // Preserve the interrupt. - Thread.currentThread().interrupt(); - operationResult = MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_INTERRUPTED; - } - } - return operationResult; - } - - /** * Mark table state to Disabled * @param env MasterProcedureEnv * @throws IOException @@ -428,64 +338,4 @@ public class DisableTableProcedure } } } - - /** - * Run bulk disable. - */ - private static class BulkDisabler extends BulkAssigner { - private final AssignmentManager assignmentManager; - private final List regions; - private final TableName tableName; - private final int waitingTimeForEvents; - - public BulkDisabler(final MasterProcedureEnv env, final TableName tableName, - final List regions) { - super(env.getMasterServices()); - this.assignmentManager = env.getMasterServices().getAssignmentManager(); - this.tableName = tableName; - this.regions = regions; - this.waitingTimeForEvents = - env.getMasterServices().getConfiguration() - .getInt("hbase.master.event.waiting.time", 1000); - } - - @Override - protected void populatePool(ExecutorService pool) { - RegionStates regionStates = assignmentManager.getRegionStates(); - for (final HRegionInfo region : regions) { - if (regionStates.isRegionInTransition(region) - && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) { - continue; - } - pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler", new Runnable() { - @Override - public void run() { - assignmentManager.unassign(region); - } - })); - } - } - - @Override - protected boolean waitUntilDone(long timeout) throws InterruptedException { - long startTime = EnvironmentEdgeManager.currentTime(); - long remaining = timeout; - List regions = null; - long lastLogTime = startTime; - while (!server.isStopped() && remaining > 0) { - Thread.sleep(waitingTimeForEvents); - regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName); - long now = EnvironmentEdgeManager.currentTime(); - // Don't log more than once every ten seconds. Its obnoxious. And only log table regions - // if we are waiting a while for them to go down... - if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) { - lastLogTime = now; - LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions); - } - if (regions.isEmpty()) break; - remaining = timeout - (now - startTime); - } - return regions != null && regions.isEmpty(); - } - } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java index f4ecf15..4f4b5b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -21,34 +21,20 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.BulkAssigner; -import org.apache.hadoop.hbase.master.GeneralBulkAssigner; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @InterfaceAudience.Private public class EnableTableProcedure @@ -114,7 +100,7 @@ public class EnableTableProcedure setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE); break; case ENABLE_TABLE_MARK_REGIONS_ONLINE: - markRegionsOnline(env, tableName, true); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(tableName)); setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLED_TABLE_STATE); break; case ENABLE_TABLE_SET_ENABLED_TABLE_STATE: @@ -287,137 +273,6 @@ public class EnableTableProcedure } /** - * Mark offline regions of the table online with retry - * @param env MasterProcedureEnv - * @param tableName the target table - * @param retryRequired whether to retry if the first run failed - * @throws IOException - */ - protected static void markRegionsOnline( - final MasterProcedureEnv env, - final TableName tableName, - final Boolean retryRequired) throws IOException { - // This is best effort approach to make all regions of a table online. If we fail to do - // that, it is ok that the table has some offline regions; user can fix it manually. - - // Dev consideration: add a config to control max number of retry. For now, it is hard coded. - int maxTry = (retryRequired ? 10 : 1); - boolean done = false; - - do { - try { - done = markRegionsOnline(env, tableName); - if (done) { - break; - } - maxTry--; - } catch (Exception e) { - LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e); - maxTry--; - if (maxTry > 0) { - continue; // we still have some retry left, try again. - } - throw e; - } - } while (maxTry > 0); - - if (!done) { - LOG.warn("Some or all regions of the Table '" + tableName + "' were offline"); - } - } - - /** - * Mark offline regions of the table online - * @param env MasterProcedureEnv - * @param tableName the target table - * @return whether the operation is fully completed or being interrupted. - * @throws IOException - */ - private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName) - throws IOException { - final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager(); - final MasterServices masterServices = env.getMasterServices(); - final ServerManager serverManager = masterServices.getServerManager(); - boolean done = false; - // Get the regions of this table. We're done when all listed - // tables are onlined. - List> tableRegionsAndLocations; - - if (TableName.META_TABLE_NAME.equals(tableName)) { - tableRegionsAndLocations = - new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper()); - } else { - tableRegionsAndLocations = - MetaTableAccessor.getTableRegionsAndLocations(masterServices.getConnection(), tableName); - } - - int countOfRegionsInTable = tableRegionsAndLocations.size(); - Map regionsToAssign = - regionsToAssignWithServerName(env, tableRegionsAndLocations); - - // need to potentially create some regions for the replicas - List unrecordedReplicas = - AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet( - regionsToAssign.keySet()), masterServices); - Map> srvToUnassignedRegs = - assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, - serverManager.getOnlineServersList()); - if (srvToUnassignedRegs != null) { - for (Map.Entry> entry : srvToUnassignedRegs.entrySet()) { - for (HRegionInfo h : entry.getValue()) { - regionsToAssign.put(h, entry.getKey()); - } - } - } - - int offlineRegionsCount = regionsToAssign.size(); - - LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which " - + offlineRegionsCount + " are offline."); - if (offlineRegionsCount == 0) { - return true; - } - - List onlineServers = serverManager.createDestinationServersList(); - Map> bulkPlan = - env.getMasterServices().getAssignmentManager().getBalancer() - .retainAssignment(regionsToAssign, onlineServers); - if (bulkPlan != null) { - LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size() - + " server(s), retainAssignment=true"); - - BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true); - try { - if (ba.bulkAssign()) { - done = true; - } - } catch (InterruptedException e) { - LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'"); - // Preserve the interrupt. - Thread.currentThread().interrupt(); - } - } else { - LOG.info("Balancer was unable to find suitable servers for table " + tableName - + ", leaving unassigned"); - } - return done; - } - - /** - * Mark regions of the table offline during recovery - * @param env MasterProcedureEnv - */ - private void markRegionsOfflineDuringRecovery(final MasterProcedureEnv env) { - try { - // This is a best effort attempt. We will move on even it does not succeed. We will retry - // several times until we giving up. - DisableTableProcedure.markRegionsOffline(env, tableName, true); - } catch (Exception e) { - LOG.debug("Failed to offline all regions of table " + tableName + ". Ignoring", e); - } - } - - /** * Mark table state to Enabled * @param env MasterProcedureEnv * @throws IOException @@ -457,32 +312,6 @@ public class EnableTableProcedure } /** - * @param regionsInMeta - * @return List of regions neither in transition nor assigned. - * @throws IOException - */ - private static Map regionsToAssignWithServerName( - final MasterProcedureEnv env, - final List> regionsInMeta) throws IOException { - Map regionsToAssign = - new HashMap(regionsInMeta.size()); - RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates(); - for (Pair regionLocation : regionsInMeta) { - HRegionInfo hri = regionLocation.getFirst(); - ServerName sn = regionLocation.getSecond(); - if (regionStates.isRegionOffline(hri)) { - regionsToAssign.put(hri, sn); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipping assign for the region " + hri + " during enable table " - + hri.getTable() + " because its already in tranition or assigned."); - } - } - } - return regionsToAssign; - } - - /** * Coprocessor Action. * @param env MasterProcedureEnv * @param state the procedure state diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java index 980bf94..e96517e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -19,32 +19,19 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedList; import java.util.List; -import java.util.NavigableMap; -import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.util.Bytes; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - /** * Helper class for schema change procedures */ @@ -60,16 +47,13 @@ public final class MasterDDLOperationHelper { public static void deleteColumnFamilyFromFileSystem( final MasterProcedureEnv env, final TableName tableName, - List regionInfoList, + final List regionInfoList, final byte[] familyName, - boolean hasMob) throws IOException { + final boolean hasMob) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); if (LOG.isDebugEnabled()) { LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName); } - if (regionInfoList == null) { - regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName); - } for (HRegionInfo hri : regionInfoList) { // Delete the family directory in FS for all the regions one by one mfs.deleteFamilyFromFS(hri, familyName); @@ -81,77 +65,4 @@ public final class MasterDDLOperationHelper { mfs.deleteFamilyFromFS(mobRootDir, mobRegionInfo, familyName); } } - - /** - * Reopen all regions from a table after a schema change operation. - **/ - public static boolean reOpenAllRegions( - final MasterProcedureEnv env, - final TableName tableName, - final List regionInfoList) throws IOException { - boolean done = false; - LOG.info("Bucketing regions by region server..."); - List regionLocations = null; - Connection connection = env.getMasterServices().getConnection(); - try (RegionLocator locator = connection.getRegionLocator(tableName)) { - regionLocations = locator.getAllRegionLocations(); - } - // Convert List to Map. - NavigableMap hri2Sn = new TreeMap(); - for (HRegionLocation location : regionLocations) { - hri2Sn.put(location.getRegionInfo(), location.getServerName()); - } - TreeMap> serverToRegions = Maps.newTreeMap(); - List reRegions = new ArrayList(); - for (HRegionInfo hri : regionInfoList) { - ServerName sn = hri2Sn.get(hri); - // Skip the offlined split parent region - // See HBASE-4578 for more information. - if (null == sn) { - LOG.info("Skip " + hri); - continue; - } - if (!serverToRegions.containsKey(sn)) { - LinkedList hriList = Lists.newLinkedList(); - serverToRegions.put(sn, hriList); - } - reRegions.add(hri); - serverToRegions.get(sn).add(hri); - } - - LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size() - + " region servers."); - AssignmentManager am = env.getMasterServices().getAssignmentManager(); - am.setRegionsToReopen(reRegions); - BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am); - while (true) { - try { - if (bulkReopen.bulkReOpen()) { - done = true; - break; - } else { - LOG.warn("Timeout before reopening all regions"); - } - } catch (InterruptedException e) { - LOG.warn("Reopen was interrupted"); - // Preserve the interrupt. - Thread.currentThread().interrupt(); - break; - } - } - return done; - } - - /** - * Get the region info list of a table from meta if it is not already known by the caller. - **/ - public static List getRegionInfoList( - final MasterProcedureEnv env, - final TableName tableName, - List regionInfoList) throws IOException { - if (regionInfoList == null) { - regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName); - } - return regionInfoList; - } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java index c21137d..f815bea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java @@ -29,7 +29,7 @@ public final class MasterProcedureConstants { /** Number of threads used by the procedure executor */ public static final String MASTER_PROCEDURE_THREADS = "hbase.master.procedure.threads"; - public static final int DEFAULT_MIN_MASTER_PROCEDURE_THREADS = 4; + public static final int DEFAULT_MIN_MASTER_PROCEDURE_THREADS = 16; /** * Procedure replay sanity check. In case a WAL is missing or unreadable we diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java index 2cd5b08..d7a4652 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureEvent; import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; @@ -93,12 +94,19 @@ public class MasterProcedureEnv implements ConfigurationObserver { } } + private final RSProcedureDispatcher remoteDispatcher; private final MasterProcedureScheduler procSched; private final MasterServices master; public MasterProcedureEnv(final MasterServices master) { + this(master, new RSProcedureDispatcher(master)); + } + + public MasterProcedureEnv(final MasterServices master, + final RSProcedureDispatcher remoteDispatcher) { this.master = master; this.procSched = new MasterProcedureScheduler(master.getConfiguration()); + this.remoteDispatcher = remoteDispatcher; } public User getRequestUser() { @@ -117,6 +125,10 @@ public class MasterProcedureEnv implements ConfigurationObserver { return master.getConfiguration(); } + public AssignmentManager getAssignmentManager() { + return master.getAssignmentManager(); + } + public MasterCoprocessorHost getMasterCoprocessorHost() { return master.getMasterCoprocessorHost(); } @@ -125,6 +137,10 @@ public class MasterProcedureEnv implements ConfigurationObserver { return procSched; } + public RSProcedureDispatcher getRemoteDispatcher() { + return remoteDispatcher; + } + public boolean isRunning() { return master.getMasterProcedureExecutor().isRunning(); } @@ -138,7 +154,15 @@ public class MasterProcedureEnv implements ConfigurationObserver { } public boolean waitServerCrashProcessingEnabled(Procedure proc) { - return procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), proc); + if (master instanceof HMaster) { + return procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), proc); + } + LOG.warn("server crash processing event on " + master); + return false; + } + + public boolean waitFailoverCleanup(Procedure proc) { + return procSched.waitEvent(master.getAssignmentManager().getFailoverCleanupEvent(), proc); } public void setEventReady(ProcedureEvent event, boolean isReady) { @@ -153,4 +177,4 @@ public class MasterProcedureEnv implements ConfigurationObserver { public void onConfigurationChange(Configuration conf) { master.getMasterProcedureExecutor().refreshConfiguration(conf); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index a3adf02..31a271f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType; import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler; import org.apache.hadoop.hbase.procedure2.LockStatus; @@ -51,52 +50,51 @@ import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator; * This ProcedureScheduler tries to provide to the ProcedureExecutor procedures * that can be executed without having to wait on a lock. * Most of the master operations can be executed concurrently, if they - * are operating on different tables (e.g. two create table can be performed - * at the same, time assuming table A and table B) or against two different servers; say - * two servers that crashed at about the same time. + * are operating on different tables (e.g. two create table procedures can be performed + * at the same time) or against two different servers; say two servers that crashed at + * about the same time. * - *

Each procedure should implement an interface providing information for this queue. - * for example table related procedures should implement TableProcedureInterface. - * each procedure will be pushed in its own queue, and based on the operation type - * we may take smarter decision. e.g. we can abort all the operations preceding + *

Each procedure should implement an Interface providing information for this queue. + * For example table related procedures should implement TableProcedureInterface. + * Each procedure will be pushed in its own queue, and based on the operation type + * we may make smarter decisions: e.g. we can abort all the operations preceding * a delete table, or similar. * *

Concurrency control

* Concurrent access to member variables (tableRunQueue, serverRunQueue, locking, tableMap, - * serverBuckets) is controlled by schedLock(). That mainly includes:
+ * serverBuckets) is controlled by schedLock(). This mainly includes:
*
    *
  • - * {@link #push(Procedure, boolean, boolean)} : A push will add a Queue back to run-queue + * {@link #push(Procedure, boolean, boolean)}: A push will add a Queue back to run-queue * when: *
      - *
    1. queue was empty before push (so must have been out of run-queue)
    2. - *
    3. child procedure is added (which means parent procedure holds exclusive lock, and it + *
    4. Queue was empty before push (so must have been out of run-queue)
    5. + *
    6. Child procedure is added (which means parent procedure holds exclusive lock, and it * must have moved Queue out of run-queue)
    7. *
    *
  • *
  • - * {@link #poll(long)} : A poll will remove a Queue from run-queue when: + * {@link #poll(long)}: A poll will remove a Queue from run-queue when: *
      - *
    1. queue becomes empty after poll
    2. - *
    3. exclusive lock is requested by polled procedure and lock is available (returns the + *
    4. Queue becomes empty after poll
    5. + *
    6. Exclusive lock is requested by polled procedure and lock is available (returns the * procedure)
    7. - *
    8. exclusive lock is requested but lock is not available (returns null)
    9. - *
    10. Polled procedure is child of parent holding exclusive lock, and the next procedure is + *
    11. Exclusive lock is requested but lock is not available (returns null)
    12. + *
    13. Polled procedure is child of parent holding exclusive lock and the next procedure is * not a child
    14. *
    *
  • *
  • - * namespace/table/region locks: Queue is added back to run-queue when lock being released is: + * Namespace/table/region locks: Queue is added back to run-queue when lock being released is: *
      - *
    1. exclusive lock
    2. - *
    3. last shared lock (in case queue was removed because next procedure in queue required + *
    4. Exclusive lock
    5. + *
    6. Last shared lock (in case queue was removed because next procedure in queue required * exclusive lock)
    7. *
    *
  • *
*/ @InterfaceAudience.Private -@InterfaceStability.Evolving public class MasterProcedureScheduler extends AbstractProcedureScheduler { private static final Log LOG = LogFactory.getLog(MasterProcedureScheduler.class); @@ -118,16 +116,16 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { * TableQueue with priority 1. */ private static class TablePriorities { + final int metaTablePriority; + final int userTablePriority; + final int sysTablePriority; + TablePriorities(Configuration conf) { metaTablePriority = conf.getInt("hbase.master.procedure.queue.meta.table.priority", 3); sysTablePriority = conf.getInt("hbase.master.procedure.queue.system.table.priority", 2); userTablePriority = conf.getInt("hbase.master.procedure.queue.user.table.priority", 1); } - final int metaTablePriority; - final int userTablePriority; - final int sysTablePriority; - int getPriority(TableName tableName) { if (tableName.equals(TableName.META_TABLE_NAME)) { return metaTablePriority; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java index d7fe5f6..b2de3fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java @@ -47,13 +47,13 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.CatalogJanitor; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -475,7 +475,7 @@ public class MergeTableRegionsProcedure RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation); LOG.info("Moving regions to same server for merge: " + regionPlan.toString()); - getAssignmentManager(env).balance(regionPlan); + getAssignmentManager(env).moveAsync(regionPlan); do { try { Thread.sleep(20); @@ -538,11 +538,13 @@ public class MergeTableRegionsProcedure transition.addRegionInfo(HRegionInfo.convert(mergedRegionInfo)); transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[0])); transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[1])); + /* TODO!!! if (env.getMasterServices().getAssignmentManager().onRegionTransition( getServerName(env), transition.build()) != null) { throw new IOException("Failed to update region state to MERGING for " + getRegionsToMergeListFullNameString()); } + */ } /** @@ -556,6 +558,7 @@ public class MergeTableRegionsProcedure transition.addRegionInfo(HRegionInfo.convert(mergedRegionInfo)); transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[0])); transition.addRegionInfo(HRegionInfo.convert(regionsToMerge[1])); + /* TODO String msg = env.getMasterServices().getAssignmentManager().onRegionTransition( getServerName(env), transition.build()); if (msg != null) { @@ -567,7 +570,7 @@ public class MergeTableRegionsProcedure + getRegionsToMergeListFullNameString() + " as part of operation for reverting merge. Error message: " + msg); } - } + }*/ } /** @@ -711,11 +714,13 @@ public class MergeTableRegionsProcedure // Add merged region and delete original regions // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region // will determine whether the region is merged or not in case of failures. + /* TODO if (env.getMasterServices().getAssignmentManager().onRegionTransition( getServerName(env), transition.build()) != null) { throw new IOException("Failed to update meta to add merged region that merges " + getRegionsToMergeListFullNameString()); } + */ } /** @@ -746,11 +751,12 @@ public class MergeTableRegionsProcedure + " as it is already opened."); return; } - +/* TODO // TODO: The new AM should provide an API to force assign the merged region to the same RS // as daughter regions; if the RS is unavailable, then assign to a different RS. env.getMasterServices().getAssignmentManager().assignMergedRegion( mergedRegionInfo, regionsToMerge[0], regionsToMerge[1]); + */ } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java index 52bb4d5..622c19f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java @@ -21,17 +21,14 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; @@ -97,7 +94,9 @@ public class ModifyColumnFamilyProcedure setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS); break; case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS: - reOpenAllRegionsIfTableIsOnline(env); + if (env.getAssignmentManager().isTableEnabled(getTableName())) { + addChildProcedure(env.getAssignmentManager().createReopenProcedures(getTableName())); + } return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException(this + " unhandled state=" + state); @@ -265,7 +264,8 @@ public class ModifyColumnFamilyProcedure env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); // Make sure regions are opened after table descriptor is updated. - reOpenAllRegionsIfTableIsOnline(env); + //reOpenAllRegionsIfTableIsOnline(env); + // TODO: NUKE ROLLBACK!!!! } /** @@ -281,26 +281,6 @@ public class ModifyColumnFamilyProcedure } /** - * Last action from the procedure - executed when online schema change is supported. - * @param env MasterProcedureEnv - * @throws IOException - */ - private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { - // This operation only run when the table is enabled. - if (!env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - return; - } - - List regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); - if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), regionInfoList)) { - LOG.info("Completed add column family operation on table " + getTableName()); - } else { - LOG.warn("Error on reopening the regions on table " + getTableName()); - } - } - - /** * The procedure could be restarted from a different machine. If the variable is null, we need to * retrieve it. * @return traceEnabled diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index f1b411a..8c52a43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -120,7 +120,10 @@ public class ModifyTableProcedure setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); break; case MODIFY_TABLE_REOPEN_ALL_REGIONS: - reOpenAllRegionsIfTableIsOnline(env); + if (env.getAssignmentManager().isTableEnabled(getTableName())) { + addChildProcedure(env.getAssignmentManager() + .createReopenProcedures(getRegionInfoList(env))); + } return Flow.NO_MORE_STATE; default: throw new UnsupportedOperationException("unhandled state=" + state); @@ -299,7 +302,8 @@ public class ModifyTableProcedure deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor); // Make sure regions are opened after table descriptor is updated. - reOpenAllRegionsIfTableIsOnline(env); + //reOpenAllRegionsIfTableIsOnline(env); + // TODO: NUKE ROLLBACK!!!! } /** @@ -374,25 +378,6 @@ public class ModifyTableProcedure } /** - * Last action from the procedure - executed when online schema change is supported. - * @param env MasterProcedureEnv - * @throws IOException - */ - private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { - // This operation only run when the table is enabled. - if (!env.getMasterServices().getTableStateManager() - .isTableState(getTableName(), TableState.State.ENABLED)) { - return; - } - - if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { - LOG.info("Completed modify table operation on table " + getTableName()); - } else { - LOG.warn("Error on reopening the regions on table " + getTableName()); - } - } - - /** * The procedure could be restarted from a different machine. If the variable is null, we need to * retrieve it. * @return traceEnabled whether the trace is enabled @@ -430,7 +415,8 @@ public class ModifyTableProcedure private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { if (regionInfoList == null) { - regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + regionInfoList = env.getAssignmentManager().getRegionStates() + .getRegionsOfTable(getTableName()); } return regionInfoList; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java index 3777c79..1542f21 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java @@ -21,30 +21,26 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InterruptedIOException; import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ProcedureInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.TimeoutIOException; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; /** * Helper to synchronously wait on conditions. @@ -64,19 +60,93 @@ public final class ProcedureSyncWait { T evaluate() throws IOException; } + private static class ProcedureFuture implements Future { + private final ProcedureExecutor procExec; + private final long procId; + + private boolean hasResult = false; + private byte[] result = null; + + public ProcedureFuture(ProcedureExecutor procExec, long procId) { + this.procExec = procExec; + this.procId = procId; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { return false; } + + @Override + public boolean isCancelled() { return false; } + + @Override + public boolean isDone() { return hasResult; } + + @Override + public byte[] get() throws InterruptedException, ExecutionException { + if (hasResult) return result; + try { + return waitForProcedureToComplete(procExec, procId, Long.MAX_VALUE); + } catch (Exception e) { + throw new ExecutionException(e); + } + } + + @Override + public byte[] get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + if (hasResult) return result; + try { + result = waitForProcedureToComplete(procExec, procId, unit.toMillis(timeout)); + hasResult = true; + return result; + } catch (TimeoutIOException e) { + throw new TimeoutException(e.getMessage()); + } catch (Exception e) { + throw new ExecutionException(e); + } + } + } + + public static Future submitProcedure(final ProcedureExecutor procExec, + final Procedure proc) { + if (proc.isInitializing()) { + procExec.submitProcedure(proc); + } + return new ProcedureFuture(procExec, proc.getProcId()); + } + public static byte[] submitAndWaitProcedure(ProcedureExecutor procExec, final Procedure proc) throws IOException { - long procId = procExec.submitProcedure(proc); - return waitForProcedureToComplete(procExec, procId); + if (proc.isInitializing()) { + procExec.submitProcedure(proc); + } + return waitForProcedureToCompleteIOE(procExec, proc.getProcId(), Long.MAX_VALUE); } - private static byte[] waitForProcedureToComplete(ProcedureExecutor procExec, - final long procId) throws IOException { - while (!procExec.isFinished(procId) && procExec.isRunning()) { - // TODO: add a config to make it tunable - // Dev Consideration: are we waiting forever, or we can set up some timeout value? - Threads.sleepWithoutInterrupt(250); + public static byte[] waitForProcedureToCompleteIOE( + final ProcedureExecutor procExec, final long procId, final long timeout) + throws IOException { + try { + return waitForProcedureToComplete(procExec, procId, timeout); + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw new IOException(e); } + } + + public static byte[] waitForProcedureToComplete( + final ProcedureExecutor procExec, final long procId, final long timeout) + throws IOException { + waitFor(procExec.getEnvironment(), "procId=" + procId, + new ProcedureSyncWait.Predicate() { + @Override + public Boolean evaluate() throws IOException { + return !procExec.isRunning() || procExec.isFinished(procId); + } + } + ); + ProcedureInfo result = procExec.getResult(procId); if (result != null) { if (result.isFailed()) { @@ -104,6 +174,7 @@ public final class ProcedureSyncWait { public static T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents, String purpose, Predicate predicate) throws IOException { final long done = EnvironmentEdgeManager.currentTime() + waitTime; + boolean logged = false; do { T result = predicate.evaluate(); if (result != null && !result.equals(Boolean.FALSE)) { @@ -115,7 +186,12 @@ public final class ProcedureSyncWait { LOG.warn("Interrupted while sleeping, waiting on " + purpose); throw (InterruptedIOException)new InterruptedIOException().initCause(e); } - LOG.debug("Waiting on " + purpose); + if (LOG.isTraceEnabled()) { + LOG.trace("Waiting on " + purpose); + } else { + if (!logged) LOG.debug("Waiting on " + purpose); + } + logged = true; } while (EnvironmentEdgeManager.currentTime() < done && env.isRunning()); throw new TimeoutIOException("Timed out while waiting on " + purpose); @@ -133,44 +209,14 @@ public final class ProcedureSyncWait { } } - protected static void waitRegionServers(final MasterProcedureEnv env) throws IOException { - final ServerManager sm = env.getMasterServices().getServerManager(); - ProcedureSyncWait.waitFor(env, "server to assign region(s)", - new ProcedureSyncWait.Predicate() { - @Override - public Boolean evaluate() throws IOException { - List servers = sm.createDestinationServersList(); - return servers != null && !servers.isEmpty(); - } - }); - } - - protected static List getRegionsFromMeta(final MasterProcedureEnv env, - final TableName tableName) throws IOException { - return ProcedureSyncWait.waitFor(env, "regions of table=" + tableName + " from meta", - new ProcedureSyncWait.Predicate>() { - @Override - public List evaluate() throws IOException { - if (TableName.META_TABLE_NAME.equals(tableName)) { - return new MetaTableLocator().getMetaRegions(env.getMasterServices().getZooKeeper()); - } - return MetaTableAccessor.getTableRegions(env.getMasterServices().getConnection(),tableName); - } - }); - } - protected static void waitRegionInTransition(final MasterProcedureEnv env, final List regions) throws IOException, CoordinatedStateException { - final AssignmentManager am = env.getMasterServices().getAssignmentManager(); - final RegionStates states = am.getRegionStates(); + final RegionStates states = env.getAssignmentManager().getRegionStates(); for (final HRegionInfo region : regions) { ProcedureSyncWait.waitFor(env, "regions " + region.getRegionNameAsString() + " in transition", new ProcedureSyncWait.Predicate() { @Override public Boolean evaluate() throws IOException { - if (states.isRegionInState(region, State.FAILED_OPEN)) { - am.regionOffline(region); - } return !states.isRegionInTransition(region); } }); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java new file mode 100644 index 0000000..a97c456 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java @@ -0,0 +1,542 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import com.google.common.collect.ArrayListMultimap; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +/** + * A remote procecdure dispatcher for regionservers. + */ +public class RSProcedureDispatcher + extends RemoteProcedureDispatcher + implements ServerListener { + private static final Log LOG = LogFactory.getLog(RSProcedureDispatcher.class); + + public static final String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY = + "hbase.regionserver.rpc.startup.waittime"; + private static final int DEFAULT_RS_RPC_STARTUP_WAIT_TIME = 60000; + + private static final int RS_VERSION_WITH_EXEC_PROCS = 0x0201000; // 2.1 + + protected final MasterServices master; + protected final long rsStartupWaitTime; + + public RSProcedureDispatcher(final MasterServices master) { + super(master.getConfiguration()); + + this.master = master; + this.rsStartupWaitTime = master.getConfiguration().getLong( + RS_RPC_STARTUP_WAIT_TIME_CONF_KEY, DEFAULT_RS_RPC_STARTUP_WAIT_TIME); + } + + @Override + public boolean start() { + if (!super.start()) { + return false; + } + + master.getServerManager().registerListener(this); + for (ServerName serverName: master.getServerManager().getOnlineServersList()) { + addNode(serverName); + } + return true; + } + + @Override + public boolean stop() { + if (!super.stop()) { + return false; + } + + master.getServerManager().unregisterListener(this); + return true; + } + + @Override + protected void remoteDispatch(final ServerName serverName, + final Set operations) { + final int rsVersion = master.getAssignmentManager().getServerVersion(serverName); + /* + if (rsVersion >= RS_VERSION_WITH_EXEC_PROCS) { + LOG.info(String.format( + "use the procedure batch rpc execution for serverName=%s version=%s", + serverName, rsVersion)); + submitTask(new ExecuteProceduresRemoteCall(serverName, operations)); + } + */ + LOG.info(String.format( + "Fallback to compat rpc execution for serverName=%s version=%s", + serverName, rsVersion)); + submitTask(new CompatRemoteProcedureResolver(serverName, operations)); + } + + protected void abortPendingOperations(final ServerName serverName, + final Set operations) { + // TODO: Replace with a ServerNotOnlineException() + final IOException e = new DoNotRetryIOException("server not online " + serverName); + final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); + for (RemoteProcedure proc: operations) { + proc.remoteCallFailed(env, serverName, e); + } + } + + public void serverAdded(final ServerName serverName) { + addNode(serverName); + } + + public void serverRemoved(final ServerName serverName) { + removeNode(serverName); + } + + /** + * Base remote call + */ + protected abstract class AbstractRSRemoteCall implements Callable { + private final ServerName serverName; + + private int numberOfAttemptsSoFar = 0; + private long maxWaitTime = -1; + + public AbstractRSRemoteCall(final ServerName serverName) { + this.serverName = serverName; + } + + public abstract Void call(); + + protected AdminService.BlockingInterface getRsAdmin() throws IOException { + final AdminService.BlockingInterface admin = master.getServerManager().getRsAdmin(serverName); + if (admin == null) { + throw new IOException("Attempting to send OPEN RPC to server " + getServerName() + + " failed because no RPC connection found to this server"); + } + return admin; + } + + protected ServerName getServerName() { + return serverName; + } + + protected boolean scheduleForRetry(final IOException e) { + // Should we wait a little before retrying? If the server is starting it's yes. + final boolean hold = (e instanceof ServerNotRunningYetException); + if (hold) { + LOG.warn(String.format("waiting a little before trying on the same server=%s try=%d", + serverName, numberOfAttemptsSoFar), e); + long now = EnvironmentEdgeManager.currentTime(); + if (now < getMaxWaitTime()) { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("server is not yet up; waiting up to %dms", + (getMaxWaitTime() - now)), e); + } + submitTask(this, 100, TimeUnit.MILLISECONDS); + return true; + } + + LOG.warn(String.format("server %s is not up for a while; try a new one", serverName), e); + return false; + } + + // In case socket is timed out and the region server is still online, + // the openRegion RPC could have been accepted by the server and + // just the response didn't go through. So we will retry to + // open the region on the same server. + final boolean retry = !hold && (e instanceof SocketTimeoutException + && master.getServerManager().isServerOnline(serverName)); + if (retry) { + // we want to retry as many times as needed as long as the RS is not dead. + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("retrying to the same region server %s due to: %s", + serverName, e.getMessage()), e); + } + submitTask(this); + return true; + } + + // trying to send the request elsewhere instead + LOG.warn(String.format("the request should be tried elsewhere instead; server=%s try=%d", + serverName, numberOfAttemptsSoFar), e); + return false; + } + + private long getMaxWaitTime() { + if (this.maxWaitTime < 0) { + // This is the max attempts, not retries, so it should be at least 1. + this.maxWaitTime = EnvironmentEdgeManager.currentTime() + rsStartupWaitTime; + } + return this.maxWaitTime; + } + + protected IOException unwrapException(IOException e) { + if (e instanceof RemoteException) { + e = ((RemoteException)e).unwrapRemoteException(); + } + return e; + } + } + + private interface RemoteProcedureResolver { + void dispatchOpenRequests(MasterProcedureEnv env, List operations); + void dispatchCloseRequests(MasterProcedureEnv env, List operations); + } + + public void splitAndResolveOperation(final ServerName serverName, + final Set operations, final RemoteProcedureResolver resolver) { + final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); + final ArrayListMultimap, RemoteOperation> reqsByType = + buildAndGroupRequestByType(env, serverName, operations); + + final List openOps = fetchType(reqsByType, RegionOpenOperation.class); + if (!openOps.isEmpty()) resolver.dispatchOpenRequests(env, openOps); + + final List closeOps = fetchType(reqsByType, RegionCloseOperation.class); + if (!closeOps.isEmpty()) resolver.dispatchCloseRequests(env, closeOps); + + if (!reqsByType.isEmpty()) { + LOG.warn("unknown request type in the queue: " + reqsByType); + } + } + + // ========================================================================== + // Compatibility calls + // ========================================================================== + protected class ExecuteProceduresRemoteCall extends AbstractRSRemoteCall + implements RemoteProcedureResolver { + private final Set operations; + + private ExecuteProceduresRequest.Builder request = null; + + public ExecuteProceduresRemoteCall(final ServerName serverName, + final Set operations) { + super(serverName); + this.operations = operations; + } + + public Void call() { + final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); + + request = ExecuteProceduresRequest.newBuilder(); + splitAndResolveOperation(getServerName(), operations, this); + + try { + final ExecuteProceduresResponse response = sendRequest(getServerName(), request.build()); + remoteCallCompleted(env, response); + } catch (IOException e) { + e = unwrapException(e); + // TODO: In the future some operation may want to bail out early. + // TODO: How many times should we retry (use numberOfAttemptsSoFar) + if (!scheduleForRetry(e)) { + remoteCallFailed(env, e); + } + } + return null; + } + + public void dispatchOpenRequests(final MasterProcedureEnv env, + final List operations) { + request.addOpenRegion(buildOpenRegionRequest(env, getServerName(), operations)); + } + + public void dispatchCloseRequests(final MasterProcedureEnv env, + final List operations) { + for (RegionCloseOperation op: operations) { + request.addCloseRegion(op.buildCloseRegionRequest(getServerName())); + } + } + + protected ExecuteProceduresResponse sendRequest(final ServerName serverName, + final ExecuteProceduresRequest request) throws IOException { + try { + return getRsAdmin().executeProcedures(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + + private void remoteCallCompleted(final MasterProcedureEnv env, + final ExecuteProceduresResponse response) { + /* + for (RemoteProcedure proc: operations) { + proc.remoteCallCompleted(env, getServerName(), response); + }*/ + } + + private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) { + for (RemoteProcedure proc: operations) { + proc.remoteCallFailed(env, getServerName(), e); + } + } + } + + // ========================================================================== + // Compatibility calls + // Since we don't have a "batch proc-exec" request on the target RS + // we have to chunk the requests by type and dispatch the specific request. + // ========================================================================== + private static OpenRegionRequest buildOpenRegionRequest(final MasterProcedureEnv env, + final ServerName serverName, final List operations) { + final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); + builder.setServerStartCode(serverName.getStartcode()); + builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime()); + for (RegionOpenOperation op: operations) { + builder.addOpenInfo(op.buildRegionOpenInfoRequest(env)); + } + return builder.build(); + } + + private final class OpenRegionRemoteCall extends AbstractRSRemoteCall { + private final List operations; + + public OpenRegionRemoteCall(final ServerName serverName, + final List operations) { + super(serverName); + this.operations = operations; + } + + @Override + public Void call() { + final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); + final OpenRegionRequest request = buildOpenRegionRequest(env, getServerName(), operations); + + try { + OpenRegionResponse response = sendRequest(getServerName(), request); + remoteCallCompleted(env, response); + } catch (IOException e) { + e = unwrapException(e); + // TODO: In the future some operation may want to bail out early. + // TODO: How many times should we retry (use numberOfAttemptsSoFar) + if (!scheduleForRetry(e)) { + remoteCallFailed(env, e); + } + } + return null; + } + + private OpenRegionResponse sendRequest(final ServerName serverName, + final OpenRegionRequest request) throws IOException { + try { + return getRsAdmin().openRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + private void remoteCallCompleted(final MasterProcedureEnv env, + final OpenRegionResponse response) { + int index = 0; + for (RegionOpenOperation op: operations) { + OpenRegionResponse.RegionOpeningState state = response.getOpeningState(index++); + op.setFailedOpen(state == OpenRegionResponse.RegionOpeningState.FAILED_OPENING); + op.getRemoteProcedure().remoteCallCompleted(env, getServerName(), op); + } + } + + private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) { + for (RegionOpenOperation op: operations) { + op.getRemoteProcedure().remoteCallFailed(env, getServerName(), e); + } + } + } + + private final class CloseRegionRemoteCall extends AbstractRSRemoteCall { + private final RegionCloseOperation operation; + + public CloseRegionRemoteCall(final ServerName serverName, + final RegionCloseOperation operation) { + super(serverName); + this.operation = operation; + } + + @Override + public Void call() { + final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment(); + final CloseRegionRequest request = operation.buildCloseRegionRequest(getServerName()); + try { + CloseRegionResponse response = sendRequest(getServerName(), request); + remoteCallCompleted(env, response); + } catch (IOException e) { + e = unwrapException(e); + // TODO: In the future some operation may want to bail out early. + // TODO: How many times should we retry (use numberOfAttemptsSoFar) + if (!scheduleForRetry(e)) { + remoteCallFailed(env, e); + } + } + return null; + } + + private CloseRegionResponse sendRequest(final ServerName serverName, + final CloseRegionRequest request) throws IOException { + try { + return getRsAdmin().closeRegion(null, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } + + private void remoteCallCompleted(final MasterProcedureEnv env, + final CloseRegionResponse response) { + operation.setClosed(response.getClosed()); + operation.getRemoteProcedure().remoteCallCompleted(env, getServerName(), operation); + } + + private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) { + operation.getRemoteProcedure().remoteCallFailed(env, getServerName(), e); + } + } + + protected class CompatRemoteProcedureResolver implements Callable, RemoteProcedureResolver { + private final Set operations; + private final ServerName serverName; + + public CompatRemoteProcedureResolver(final ServerName serverName, + final Set operations) { + this.serverName = serverName; + this.operations = operations; + } + + @Override + public Void call() { + splitAndResolveOperation(serverName, operations, this); + return null; + } + + public void dispatchOpenRequests(final MasterProcedureEnv env, + final List operations) { + submitTask(new OpenRegionRemoteCall(serverName, operations)); + } + + public void dispatchCloseRequests(final MasterProcedureEnv env, + final List operations) { + for (RegionCloseOperation op: operations) { + submitTask(new CloseRegionRemoteCall(serverName, op)); + } + } + } + + // ========================================================================== + // RPC Messages + // - ServerOperation: refreshConfig, grant, revoke, ... + // - RegionOperation: open, close, flush, snapshot, ... + // ========================================================================== + public static abstract class ServerOperation extends RemoteOperation { + protected ServerOperation(final RemoteProcedure remoteProcedure) { + super(remoteProcedure); + } + } + + public static abstract class RegionOperation extends RemoteOperation { + private final HRegionInfo regionInfo; + + protected RegionOperation(final RemoteProcedure remoteProcedure, + final HRegionInfo regionInfo) { + super(remoteProcedure); + this.regionInfo = regionInfo; + } + + public HRegionInfo getRegionInfo() { + return this.regionInfo; + } + } + + public static class RegionOpenOperation extends RegionOperation { + private final List favoredNodes; + private final boolean openForReplay; + private boolean failedOpen; + + public RegionOpenOperation(final RemoteProcedure remoteProcedure, + final HRegionInfo regionInfo, final List favoredNodes, + final boolean openForReplay) { + super(remoteProcedure, regionInfo); + this.favoredNodes = favoredNodes; + this.openForReplay = openForReplay; + } + + protected void setFailedOpen(final boolean failedOpen) { + this.failedOpen = failedOpen; + } + + public boolean isFailedOpen() { + return failedOpen; + } + + public OpenRegionRequest.RegionOpenInfo buildRegionOpenInfoRequest( + final MasterProcedureEnv env) { + return RequestConverter.buildRegionOpenInfo(getRegionInfo(), + env.getAssignmentManager().getFavoredNodes(getRegionInfo()), false); + } + } + + public static class RegionCloseOperation extends RegionOperation { + private final ServerName destinationServer; + private boolean closed = false; + + public RegionCloseOperation(final RemoteProcedure remoteProcedure, + final HRegionInfo regionInfo, final ServerName destinationServer) { + super(remoteProcedure, regionInfo); + this.destinationServer = destinationServer; + } + + public ServerName getDestinationServer() { + return destinationServer; + } + + protected void setClosed(final boolean closed) { + this.closed = closed; + } + + public boolean isClosed() { + return closed; + } + + public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) { + return ProtobufUtil.buildCloseRegionRequest(serverName, + getRegionInfo().getRegionName(), getDestinationServer()); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index d99bd6b..05c8e70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MetricsSnapshot; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -408,17 +407,7 @@ public class RestoreSnapshotProcedure try { Connection conn = env.getMasterServices().getConnection(); - // 1. Forces all the RegionStates to be offline - // - // The AssignmentManager keeps all the region states around - // with no possibility to remove them, until the master is restarted. - // This means that a region marked as SPLIT before the restore will never be assigned again. - // To avoid having all states around all the regions are switched to the OFFLINE state, - // which is the same state that the regions will be after a delete table. - forceRegionsOffline(env, regionsToAdd); - forceRegionsOffline(env, regionsToRestore); - forceRegionsOffline(env, regionsToRemove); - + // 1. Prepare to restore getMonitorStatus().setStatus("Preparing to restore each region"); // 2. Applies changes to hbase:meta @@ -478,20 +467,6 @@ public class RestoreSnapshotProcedure } /** - * Make sure that region states of the region list is in OFFLINE state. - * @param env MasterProcedureEnv - * @param hris region info list - **/ - private void forceRegionsOffline(final MasterProcedureEnv env, final List hris) { - RegionStates states = env.getMasterServices().getAssignmentManager().getRegionStates(); - if (hris != null) { - for (HRegionInfo hri: hris) { - states.regionOffline(hri); - } - } - } - - /** * The procedure could be restarted from a different machine. If the variable is null, we need to * retrieve it. * @return traceEnabled diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index 7b4eb6e..8b6a4fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -19,28 +19,23 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InputStream; -import java.io.InterruptedIOException; import java.io.OutputStream; import java.util.ArrayList; import java.util.Collection; -import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Set; -import java.util.concurrent.locks.Lock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterWalManager; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo; @@ -48,10 +43,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.util.StringUtils; -import org.apache.zookeeper.KeeperException; /** * Handle crashed server. This is a port to ProcedureV2 of what used to be euphemistically called @@ -65,46 +57,13 @@ import org.apache.zookeeper.KeeperException; * completion of each successful flow step. We do this so that we do not 'deadlock' waiting on * a region assignment so we can replay edits which could happen if a region moved there are edits * on two servers for replay. - * - *

TODO: ASSIGN and WAIT_ON_ASSIGN (at least) are not idempotent. Revisit when assign is pv2. - * TODO: We do not have special handling for system tables. */ public class ServerCrashProcedure -extends StateMachineProcedure -implements ServerProcedureInterface { + extends StateMachineProcedure + implements ServerProcedureInterface { private static final Log LOG = LogFactory.getLog(ServerCrashProcedure.class); /** - * Configuration key to set how long to wait in ms doing a quick check on meta state. - */ - public static final String KEY_SHORT_WAIT_ON_META = - "hbase.master.servercrash.short.wait.on.meta.ms"; - - public static final int DEFAULT_SHORT_WAIT_ON_META = 1000; - - /** - * Configuration key to set how many retries to cycle before we give up on meta. - * Each attempt will wait at least {@link #KEY_SHORT_WAIT_ON_META} milliseconds. - */ - public static final String KEY_RETRIES_ON_META = - "hbase.master.servercrash.meta.retries"; - - public static final int DEFAULT_RETRIES_ON_META = 10; - - /** - * Configuration key to set how long to wait in ms on regions in transition. - */ - public static final String KEY_WAIT_ON_RIT = - "hbase.master.servercrash.wait.on.rit.ms"; - - public static final int DEFAULT_WAIT_ON_RIT = 30000; - - private static final Set META_REGION_SET = new HashSet(); - static { - META_REGION_SET.add(HRegionInfo.FIRST_META_REGIONINFO); - } - - /** * Name of the crashed server to process. */ private ServerName serverName; @@ -117,14 +76,8 @@ implements ServerProcedureInterface { /** * Regions that were on the crashed server. */ - private Set regionsOnCrashedServer; + private List regionsOnCrashedServer; - /** - * Regions assigned. Usually some subset of {@link #regionsOnCrashedServer}. - */ - private List regionsAssigned; - - private boolean distributedLogReplay = false; private boolean carryingMeta = false; private boolean shouldSplitWal; @@ -175,7 +128,7 @@ implements ServerProcedureInterface { @Override protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) - throws ProcedureYieldException { + throws ProcedureSuspendedException, ProcedureYieldException { if (LOG.isTraceEnabled()) { LOG.trace(state); } @@ -186,11 +139,7 @@ implements ServerProcedureInterface { } else { this.cycles++; } - MasterServices services = env.getMasterServices(); - // Is master fully online? If not, yield. No processing of servers unless master is up - if (!services.getAssignmentManager().isFailoverCleanupDone()) { - throwProcedureYieldException("Waiting on master failover to complete"); - } + final MasterServices services = env.getMasterServices(); // HBASE-14802 // If we have not yet notified that we are processing a dead server, we should do now. if (!notifiedDeadServer) { @@ -204,95 +153,48 @@ implements ServerProcedureInterface { LOG.info("Start processing crashed " + this.serverName); start(env); // If carrying meta, process it first. Else, get list of regions on crashed server. - if (this.carryingMeta) setNextState(ServerCrashState.SERVER_CRASH_PROCESS_META); - else setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS); + if (this.carryingMeta) { + setNextState(ServerCrashState.SERVER_CRASH_PROCESS_META); + } else { + setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS); + } break; case SERVER_CRASH_GET_REGIONS: // If hbase:meta is not assigned, yield. - if (!isMetaAssignedQuickTest(env)) { - // isMetaAssignedQuickTest does not really wait. Let's delay a little before - // another round of execution. - long wait = - env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META, - DEFAULT_SHORT_WAIT_ON_META); - wait = wait / 10; - Thread.sleep(wait); - throwProcedureYieldException("Waiting on hbase:meta assignment"); + if (env.getAssignmentManager().waitMetaInitialized(this)) { + throw new ProcedureSuspendedException(); } - this.regionsOnCrashedServer = - services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName); - // Where to go next? Depends on whether we should split logs at all or if we should do - // distributed log splitting (DLS) vs distributed log replay (DLR). + + this.regionsOnCrashedServer = services.getAssignmentManager().getRegionStates() + .getServerRegionInfoSet(serverName); + LOG.info("GET REGIONS ON CRASHED SERVER: " + regionsOnCrashedServer); + // Where to go next? Depends on whether we should split logs at all or + // if we should do distributed log splitting. if (!this.shouldSplitWal) { setNextState(ServerCrashState.SERVER_CRASH_ASSIGN); - } else if (this.distributedLogReplay) { - setNextState(ServerCrashState.SERVER_CRASH_PREPARE_LOG_REPLAY); } else { setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS); } break; case SERVER_CRASH_PROCESS_META: - // If we fail processing hbase:meta, yield. - if (!processMeta(env)) { - throwProcedureYieldException("Waiting on regions-in-transition to clear"); - } + processMeta(env); setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS); break; - case SERVER_CRASH_PREPARE_LOG_REPLAY: - prepareLogReplay(env, this.regionsOnCrashedServer); - setNextState(ServerCrashState.SERVER_CRASH_ASSIGN); - break; - case SERVER_CRASH_SPLIT_LOGS: splitLogs(env); - // If DLR, go to FINISH. Otherwise, if DLS, go to SERVER_CRASH_CALC_REGIONS_TO_ASSIGN - if (this.distributedLogReplay) setNextState(ServerCrashState.SERVER_CRASH_FINISH); - else setNextState(ServerCrashState.SERVER_CRASH_ASSIGN); + setNextState(ServerCrashState.SERVER_CRASH_ASSIGN); break; case SERVER_CRASH_ASSIGN: - List regionsToAssign = calcRegionsToAssign(env); - - // Assign may not be idempotent. SSH used to requeue the SSH if we got an IOE assigning - // which is what we are mimicing here but it looks prone to double assignment if assign - // fails midway. TODO: Test. - // If no regions to assign, skip assign and skip to the finish. - boolean regions = regionsToAssign != null && !regionsToAssign.isEmpty(); - if (regions) { - this.regionsAssigned = regionsToAssign; - if (!assign(env, regionsToAssign)) { - throwProcedureYieldException("Failed assign; will retry"); - } - } - if (this.shouldSplitWal && distributedLogReplay) { - // Take this route even if there are apparently no regions assigned. This may be our - // second time through here; i.e. we assigned and crashed just about here. On second - // time through, there will be no regions because we assigned them in the previous step. - // Even though no regions, we need to go through here to clean up the DLR zk markers. - setNextState(ServerCrashState.SERVER_CRASH_WAIT_ON_ASSIGN); - } else { - setNextState(ServerCrashState.SERVER_CRASH_FINISH); + if (filterDefaultMetaRegions(regionsOnCrashedServer)) { + addChildProcedure(env.getAssignmentManager() + .createAssignProcedures(regionsOnCrashedServer, true)); } - break; - - case SERVER_CRASH_WAIT_ON_ASSIGN: - // TODO: The list of regionsAssigned may be more than we actually assigned. See down in - // AM #1629 around 'if (regionStates.wasRegionOnDeadServer(encodedName)) {' where where we - // will skip assigning a region because it is/was on a dead server. Should never happen! - // It was on this server. Worst comes to worst, we'll still wait here till other server is - // processed. - - // If the wait on assign failed, yield -- if we have regions to assign. - if (this.regionsAssigned != null && !this.regionsAssigned.isEmpty()) { - if (!waitOnAssign(env, this.regionsAssigned)) { - throwProcedureYieldException("Waiting on region assign"); - } - } - setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS); + setNextState(ServerCrashState.SERVER_CRASH_FINISH); break; case SERVER_CRASH_FINISH: @@ -305,10 +207,6 @@ implements ServerProcedureInterface { } } catch (IOException e) { LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + "; retry", e); - } catch (InterruptedException e) { - // TODO: Make executor allow IEs coming up out of execute. - LOG.warn("Interrupted serverName=" + this.serverName + ", state=" + state + "; retry", e); - Thread.currentThread().interrupt(); } return Flow.HAS_MORE_STATE; } @@ -319,84 +217,54 @@ implements ServerProcedureInterface { * @throws IOException */ private void start(final MasterProcedureEnv env) throws IOException { - MasterWalManager mwm = env.getMasterServices().getMasterWalManager(); + final MasterWalManager mwm = env.getMasterServices().getMasterWalManager(); // Set recovery mode late. This is what the old ServerShutdownHandler used do. mwm.setLogRecoveryMode(); - this.distributedLogReplay = mwm.getLogRecoveryMode() == RecoveryMode.LOG_REPLAY; + if (mwm.getLogRecoveryMode() == RecoveryMode.LOG_REPLAY) { + String msg = "distributed log replay is not supported"; + LOG.error(msg); + env.getMasterServices().abort(msg, null); + } } /** * @param env - * @return False if we fail to assign and split logs on meta ('process'). * @throws IOException * @throws InterruptedException */ - private boolean processMeta(final MasterProcedureEnv env) - throws IOException { + private void processMeta(final MasterProcedureEnv env) throws IOException { if (LOG.isDebugEnabled()) LOG.debug("Processing hbase:meta that was on " + this.serverName); - MasterWalManager mwm = env.getMasterServices().getMasterWalManager(); - AssignmentManager am = env.getMasterServices().getAssignmentManager(); - HRegionInfo metaHRI = HRegionInfo.FIRST_META_REGIONINFO; + if (this.shouldSplitWal) { - if (this.distributedLogReplay) { - prepareLogReplay(env, META_REGION_SET); - } else { - // TODO: Matteo. We BLOCK here but most important thing to be doing at this moment. - mwm.splitMetaLog(serverName); - am.getRegionStates().logSplit(metaHRI); - } + // TODO: Matteo. We BLOCK here but most important thing to be doing at this moment. + env.getMasterServices().getMasterWalManager().splitMetaLog(serverName); } // Assign meta if still carrying it. Check again: region may be assigned because of RIT timeout - boolean processed = true; - if (am.isCarryingMeta(serverName)) { - // TODO: May block here if hard time figuring state of meta. - am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO); - verifyAndAssignMetaWithRetries(env); - if (this.shouldSplitWal && distributedLogReplay) { - int timeout = env.getMasterConfiguration().getInt(KEY_WAIT_ON_RIT, DEFAULT_WAIT_ON_RIT); - if (!waitOnRegionToClearRegionsInTransition(am, metaHRI, timeout)) { - processed = false; - } else { - // TODO: Matteo. We BLOCK here but most important thing to be doing at this moment. - mwm.splitMetaLog(serverName); - } - } + final AssignmentManager am = env.getMasterServices().getAssignmentManager(); + for (HRegionInfo hri: am.getRegionStates().getServerRegionInfoSet(serverName)) { + if (!isDefaultMetaRegion(hri)) continue; + + am.offlineRegion(hri); + addChildProcedure(am.createAssignProcedure(hri, true)); } - return processed; } - /** - * @return True if region cleared RIT, else false if we timed out waiting. - * @throws InterruptedIOException - */ - private boolean waitOnRegionToClearRegionsInTransition(AssignmentManager am, - final HRegionInfo hri, final int timeout) - throws InterruptedIOException { - try { - if (!am.waitOnRegionToClearRegionsInTransition(hri, timeout)) { - // Wait here is to avoid log replay hits current dead server and incur a RPC timeout - // when replay happens before region assignment completes. - LOG.warn("Region " + hri.getEncodedName() + " didn't complete assignment in time"); - return false; + private boolean filterDefaultMetaRegions(final List regions) { + if (regions == null) return false; + final Iterator it = regions.iterator(); + while (it.hasNext()) { + final HRegionInfo hri = it.next(); + if (isDefaultMetaRegion(hri)) { + it.remove(); } - } catch (InterruptedException ie) { - throw new InterruptedIOException("Caught " + ie + - " during waitOnRegionToClearRegionsInTransition for " + hri); } - return true; + return !regions.isEmpty(); } - private void prepareLogReplay(final MasterProcedureEnv env, final Set regions) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Mark " + size(this.regionsOnCrashedServer) + " regions-in-recovery from " + - this.serverName); - } - MasterWalManager mwm = env.getMasterServices().getMasterWalManager(); - AssignmentManager am = env.getMasterServices().getAssignmentManager(); - mwm.prepareLogReplay(this.serverName, regions); - am.getRegionStates().logSplit(this.serverName); + private boolean isDefaultMetaRegion(final HRegionInfo hri) { + return hri.getTable().equals(TableName.META_TABLE_NAME) && + RegionReplicaUtil.isDefaultReplica(hri); } private void splitLogs(final MasterProcedureEnv env) throws IOException { @@ -415,127 +283,8 @@ implements ServerProcedureInterface { return hris == null? 0: hris.size(); } - /** - * Figure out what we need to assign. Should be idempotent. - * @param env - * @return List of calculated regions to assign; may be empty or null. - * @throws IOException - */ - private List calcRegionsToAssign(final MasterProcedureEnv env) - throws IOException { - AssignmentManager am = env.getMasterServices().getAssignmentManager(); - List regionsToAssignAggregator = new ArrayList(); - int replicaCount = env.getMasterConfiguration().getInt(HConstants.META_REPLICAS_NUM, - HConstants.DEFAULT_META_REPLICA_NUM); - for (int i = 1; i < replicaCount; i++) { - HRegionInfo metaHri = - RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO, i); - if (am.isCarryingMetaReplica(this.serverName, metaHri)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Reassigning meta replica" + metaHri + " that was on " + this.serverName); - } - regionsToAssignAggregator.add(metaHri); - } - } - // Clean out anything in regions in transition. - List regionsInTransition = am.cleanOutCrashedServerReferences(serverName); - if (LOG.isDebugEnabled()) { - LOG.debug("Reassigning " + size(this.regionsOnCrashedServer) + - " region(s) that " + (serverName == null? "null": serverName) + - " was carrying (and " + regionsInTransition.size() + - " regions(s) that were opening on this server)"); - } - regionsToAssignAggregator.addAll(regionsInTransition); - - // Iterate regions that were on this server and figure which of these we need to reassign - if (this.regionsOnCrashedServer != null && !this.regionsOnCrashedServer.isEmpty()) { - RegionStates regionStates = am.getRegionStates(); - for (HRegionInfo hri: this.regionsOnCrashedServer) { - if (regionsInTransition.contains(hri)) continue; - String encodedName = hri.getEncodedName(); - Lock lock = am.acquireRegionLock(encodedName); - try { - RegionState rit = regionStates.getRegionTransitionState(hri); - if (processDeadRegion(hri, am)) { - ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri); - if (addressFromAM != null && !addressFromAM.equals(this.serverName)) { - // If this region is in transition on the dead server, it must be - // opening or pending_open, which should have been covered by - // AM#cleanOutCrashedServerReferences - LOG.info("Skip assigning " + hri.getRegionNameAsString() - + " because opened on " + addressFromAM.getServerName()); - continue; - } - if (rit != null) { - if (rit.getServerName() != null && !rit.isOnServer(this.serverName)) { - // Skip regions that are in transition on other server - LOG.info("Skip assigning region in transition on other server" + rit); - continue; - } - LOG.info("Reassigning region " + rit + " and clearing zknode if exists"); - regionStates.updateRegionState(hri, RegionState.State.OFFLINE); - } else if (regionStates.isRegionInState( - hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) { - regionStates.updateRegionState(hri, RegionState.State.OFFLINE); - } - regionsToAssignAggregator.add(hri); - // TODO: The below else if is different in branch-1 from master branch. - } else if (rit != null) { - if ((rit.isClosing() || rit.isFailedClose() || rit.isOffline()) - && am.getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED, TableState.State.DISABLING) || - am.getReplicasToClose().contains(hri)) { - // If the table was partially disabled and the RS went down, we should clear the - // RIT and remove the node for the region. - // The rit that we use may be stale in case the table was in DISABLING state - // but though we did assign we will not be clearing the znode in CLOSING state. - // Doing this will have no harm. See HBASE-5927 - regionStates.updateRegionState(hri, RegionState.State.OFFLINE); - am.offlineDisabledRegion(hri); - } else { - LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition " - + rit + " not to be assigned by SSH of server " + serverName); - } - } - } finally { - lock.unlock(); - } - } - } - return regionsToAssignAggregator; - } - - private boolean assign(final MasterProcedureEnv env, final List hris) - throws InterruptedIOException { - AssignmentManager am = env.getMasterServices().getAssignmentManager(); - try { - am.assign(hris); - } catch (InterruptedException ie) { - LOG.error("Caught " + ie + " during round-robin assignment"); - throw (InterruptedIOException)new InterruptedIOException().initCause(ie); - } catch (IOException ioe) { - LOG.info("Caught " + ioe + " during region assignment, will retry"); - return false; - } - return true; - } - - private boolean waitOnAssign(final MasterProcedureEnv env, final List hris) - throws InterruptedIOException { - int timeout = env.getMasterConfiguration().getInt(KEY_WAIT_ON_RIT, DEFAULT_WAIT_ON_RIT); - for (HRegionInfo hri: hris) { - // TODO: Blocks here. - if (!waitOnRegionToClearRegionsInTransition(env.getMasterServices().getAssignmentManager(), - hri, timeout)) { - return false; - } - } - return true; - } - @Override - protected void rollbackState(MasterProcedureEnv env, ServerCrashState state) - throws IOException { + protected void rollbackState(MasterProcedureEnv env, ServerCrashState state) throws IOException { // Can't rollback. throw new UnsupportedOperationException("unhandled state=" + state); } @@ -581,7 +330,7 @@ implements ServerProcedureInterface { public void toStringClassDetails(StringBuilder sb) { sb.append(getClass().getSimpleName()); sb.append(" serverName="); - sb.append(this.serverName); + sb.append(serverName); sb.append(", shouldSplitWal="); sb.append(shouldSplitWal); sb.append(", carryingMeta="); @@ -595,7 +344,6 @@ implements ServerProcedureInterface { MasterProcedureProtos.ServerCrashStateData.Builder state = MasterProcedureProtos.ServerCrashStateData.newBuilder(). setServerName(ProtobufUtil.toServerName(this.serverName)). - setDistributedLogReplay(this.distributedLogReplay). setCarryingMeta(this.carryingMeta). setShouldSplitWal(this.shouldSplitWal); if (this.regionsOnCrashedServer != null && !this.regionsOnCrashedServer.isEmpty()) { @@ -603,11 +351,6 @@ implements ServerProcedureInterface { state.addRegionsOnCrashedServer(HRegionInfo.convert(hri)); } } - if (this.regionsAssigned != null && !this.regionsAssigned.isEmpty()) { - for (HRegionInfo hri: this.regionsAssigned) { - state.addRegionsAssigned(HRegionInfo.convert(hri)); - } - } state.build().writeDelimitedTo(stream); } @@ -618,142 +361,16 @@ implements ServerProcedureInterface { MasterProcedureProtos.ServerCrashStateData state = MasterProcedureProtos.ServerCrashStateData.parseDelimitedFrom(stream); this.serverName = ProtobufUtil.toServerName(state.getServerName()); - this.distributedLogReplay = state.hasDistributedLogReplay()? - state.getDistributedLogReplay(): false; this.carryingMeta = state.hasCarryingMeta()? state.getCarryingMeta(): false; // shouldSplitWAL has a default over in pb so this invocation will always work. this.shouldSplitWal = state.getShouldSplitWal(); int size = state.getRegionsOnCrashedServerCount(); if (size > 0) { - this.regionsOnCrashedServer = new HashSet(size); + this.regionsOnCrashedServer = new ArrayList(size); for (RegionInfo ri: state.getRegionsOnCrashedServerList()) { this.regionsOnCrashedServer.add(HRegionInfo.convert(ri)); } } - size = state.getRegionsAssignedCount(); - if (size > 0) { - this.regionsAssigned = new ArrayList(size); - for (RegionInfo ri: state.getRegionsOnCrashedServerList()) { - this.regionsAssigned.add(HRegionInfo.convert(ri)); - } - } - } - - /** - * Process a dead region from a dead RS. Checks if the region is disabled or - * disabling or if the region has a partially completed split. - * @param hri - * @param assignmentManager - * @return Returns true if specified region should be assigned, false if not. - * @throws IOException - */ - private static boolean processDeadRegion(HRegionInfo hri, AssignmentManager assignmentManager) - throws IOException { - boolean tablePresent = assignmentManager.getTableStateManager().isTablePresent(hri.getTable()); - if (!tablePresent) { - LOG.info("The table " + hri.getTable() + " was deleted. Hence not proceeding."); - return false; - } - // If table is not disabled but the region is offlined, - boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLED); - if (disabled){ - LOG.info("The table " + hri.getTable() + " was disabled. Hence not proceeding."); - return false; - } - if (hri.isOffline() && hri.isSplit()) { - // HBASE-7721: Split parent and daughters are inserted into hbase:meta as an atomic operation. - // If the meta scanner saw the parent split, then it should see the daughters as assigned - // to the dead server. We don't have to do anything. - return false; - } - boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - TableState.State.DISABLING); - if (disabling) { - LOG.info("The table " + hri.getTable() + " is disabled. Hence not assigning region" + - hri.getEncodedName()); - return false; - } - return true; - } - - /** - * If hbase:meta is not assigned already, assign. - * @throws IOException - */ - private void verifyAndAssignMetaWithRetries(final MasterProcedureEnv env) throws IOException { - MasterServices services = env.getMasterServices(); - int iTimes = services.getConfiguration().getInt(KEY_RETRIES_ON_META, DEFAULT_RETRIES_ON_META); - // Just reuse same time as we have for short wait on meta. Adding another config is overkill. - long waitTime = - services.getConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META); - int iFlag = 0; - while (true) { - try { - verifyAndAssignMeta(env); - break; - } catch (KeeperException e) { - services.abort("In server shutdown processing, assigning meta", e); - throw new IOException("Aborting", e); - } catch (Exception e) { - if (iFlag >= iTimes) { - services.abort("verifyAndAssignMeta failed after" + iTimes + " retries, aborting", e); - throw new IOException("Aborting", e); - } - try { - Thread.sleep(waitTime); - } catch (InterruptedException e1) { - LOG.warn("Interrupted when is the thread sleep", e1); - Thread.currentThread().interrupt(); - throw (InterruptedIOException)new InterruptedIOException().initCause(e1); - } - iFlag++; - } - } - } - - /** - * If hbase:meta is not assigned already, assign. - * @throws InterruptedException - * @throws IOException - * @throws KeeperException - */ - private void verifyAndAssignMeta(final MasterProcedureEnv env) - throws InterruptedException, IOException, KeeperException { - MasterServices services = env.getMasterServices(); - if (!isMetaAssignedQuickTest(env)) { - services.getAssignmentManager().assignMeta(HRegionInfo.FIRST_META_REGIONINFO); - } else if (serverName.equals(services.getMetaTableLocator(). - getMetaRegionLocation(services.getZooKeeper()))) { - throw new IOException("hbase:meta is onlined on the dead server " + this.serverName); - } else { - LOG.info("Skip assigning hbase:meta because it is online at " - + services.getMetaTableLocator().getMetaRegionLocation(services.getZooKeeper())); - } - } - - /** - * A quick test that hbase:meta is assigned; blocks for short time only. - * @return True if hbase:meta location is available and verified as good. - * @throws InterruptedException - * @throws IOException - */ - private boolean isMetaAssignedQuickTest(final MasterProcedureEnv env) - throws InterruptedException, IOException { - ZooKeeperWatcher zkw = env.getMasterServices().getZooKeeper(); - MetaTableLocator mtl = env.getMasterServices().getMetaTableLocator(); - boolean metaAssigned = false; - // Is hbase:meta location available yet? - if (mtl.isLocationAvailable(zkw)) { - ClusterConnection connection = env.getMasterServices().getClusterConnection(); - // Is hbase:meta location good yet? - long timeout = - env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META); - if (mtl.verifyMetaRegionLocation(connection, zkw, timeout)) { - metaAssigned = true; - } - } - return metaAssigned; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java index 69b89be..8f25010 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java @@ -38,10 +38,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -50,15 +51,18 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; @@ -141,8 +145,8 @@ public class SplitTableRegionProcedure @Override protected Flow executeFromState(final MasterProcedureEnv env, final SplitTableRegionState state) throws InterruptedException { - if (isTraceEnabled()) { - LOG.trace(this + " execute state=" + state); + if (LOG.isDebugEnabled()) { + LOG.debug(this + " execute state=" + state); } try { @@ -152,7 +156,7 @@ public class SplitTableRegionProcedure setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION); break; } else { - assert isFailed() : "split region should have an exception here"; + assert isFailed() : "Split region should have an exception here"; return Flow.NO_MORE_STATE; } case SPLIT_TABLE_REGION_PRE_OPERATION: @@ -331,19 +335,18 @@ public class SplitTableRegionProcedure @Override public void toStringClassDetails(StringBuilder sb) { sb.append(getClass().getSimpleName()); - sb.append(" (table="); + sb.append(" table="); sb.append(getTableName()); - sb.append(" parent region="); - sb.append(parentHRI); + sb.append(", parent="); + sb.append(parentHRI.getRegionNameAsString()); if (daughter_1_HRI != null) { - sb.append(" first daughter region="); - sb.append(daughter_1_HRI); + sb.append(" daughter1="); + sb.append(daughter_1_HRI.getRegionNameAsString()); } if (daughter_2_HRI != null) { - sb.append(" and second daughter region="); - sb.append(daughter_2_HRI); + sb.append(", daughter2="); + sb.append(daughter_2_HRI.getRegionNameAsString()); } - sb.append(")"); } @Override @@ -385,9 +388,8 @@ public class SplitTableRegionProcedure final RegionState state = getParentRegionState(env); if (state.isClosing() || state.isClosed() || state.isSplittingOrSplitOnServer(state.getServerName())) { - setFailure( - "master-split-region", - new IOException("Split region " + parentHRI + " failed due to region is not splittable")); + setFailure("master-split-region", + new IOException("Split " + parentHRI.getRegionNameAsString() + " failed, state=" + state)); return false; } return true; @@ -427,15 +429,24 @@ public class SplitTableRegionProcedure */ @VisibleForTesting public void setRegionStateToSplitting(final MasterProcedureEnv env) throws IOException { + reportRegionStateTransition(env, TransitionCode.READY_TO_SPLIT); + } + + private void reportRegionStateTransition(final MasterProcedureEnv env, TransitionCode code) + throws IOException { RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); - transition.setTransitionCode(TransitionCode.READY_TO_SPLIT); - transition.addRegionInfo(HRegionInfo.convert(parentHRI)); - transition.addRegionInfo(HRegionInfo.convert(daughter_1_HRI)); - transition.addRegionInfo(HRegionInfo.convert(daughter_2_HRI)); - if (env.getMasterServices().getAssignmentManager().onRegionTransition( - getParentRegionState(env).getServerName(), transition.build()) != null) { - throw new IOException("Failed to update region state to SPLITTING for " - + parentHRI.getRegionNameAsString()); + transition.setTransitionCode(code); + transition.addRegionInfo(HRegionInfo.convert(this.parentHRI)); + transition.addRegionInfo(HRegionInfo.convert(this.daughter_1_HRI)); + transition.addRegionInfo(HRegionInfo.convert(this.daughter_2_HRI)); + ReportRegionStateTransitionRequest.Builder request = + ReportRegionStateTransitionRequest.newBuilder(); + request.addTransition(transition.build()); + request.setServer(ProtobufUtil.toServerName(getParentRegionState(env).getServerName())); + ReportRegionStateTransitionResponse response = env.getMasterServices().getAssignmentManager(). + reportRegionStateTransition(request.build()); + if (response.hasErrorMessage()) { + throw new IOException("Failed request=" + request + ", response=" + response); } } @@ -445,16 +456,7 @@ public class SplitTableRegionProcedure * @throws IOException */ private void setRegionStateToRevertSplitting(final MasterProcedureEnv env) throws IOException { - RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); - transition.setTransitionCode(TransitionCode.SPLIT_REVERTED); - transition.addRegionInfo(HRegionInfo.convert(parentHRI)); - transition.addRegionInfo(HRegionInfo.convert(daughter_1_HRI)); - transition.addRegionInfo(HRegionInfo.convert(daughter_2_HRI)); - if (env.getMasterServices().getAssignmentManager().onRegionTransition( - getParentRegionState(env).getServerName(), transition.build()) != null) { - throw new IOException("Failed to update region state for " - + parentHRI.getRegionNameAsString() + " as part of operation for reverting split"); - } + reportRegionStateTransition(env, TransitionCode.SPLIT_REVERTED); } /** @@ -467,7 +469,7 @@ public class SplitTableRegionProcedure boolean success = env.getMasterServices().getServerManager().sendRegionCloseForSplitOrMerge( getParentRegionState(env).getServerName(), parentHRI); if (!success) { - throw new IOException("Close parent region " + parentHRI + " for splitting failed." + throw new IOException("Close of " + parentHRI.getEncodedName() + " for splitting failed." + " Check region server log for more details"); } } @@ -557,7 +559,7 @@ public class SplitTableRegionProcedure // Max #threads is the smaller of the number of storefiles or the default max determined above. int maxThreads = Math.min( conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles); - LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + + LOG.info("Preparing to split " + nbFiles + " storefiles in " + parentHRI.getEncodedName() + " using " + maxThreads + " threads"); ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool( maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d")); @@ -614,8 +616,7 @@ public class SplitTableRegionProcedure } if (LOG.isDebugEnabled()) { - LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA - + " storefiles, Daughter B: " + daughterB + " storefiles."); + LOG.debug("Split storefiles for " + parentHRI.getEncodedName()); } return new Pair(daughterA, daughterB); } @@ -633,8 +634,8 @@ public class SplitTableRegionProcedure private Pair splitStoreFile(final HRegionFileSystem regionFs, final byte[] family, final StoreFile sf) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Splitting started for store file: " + sf.getPath() + " for region: " + parentHRI); + if (LOG.isTraceEnabled()) { + LOG.trace("Splitting " + sf.getPath()); } final byte[] splitRow = getSplitRow(); @@ -644,7 +645,7 @@ public class SplitTableRegionProcedure final Path path_second = regionFs.splitStoreFile(this.daughter_2_HRI, familyName, sf, splitRow, true, null); if (LOG.isDebugEnabled()) { - LOG.debug("Splitting complete for store file: " + sf.getPath() + " for region: " + parentHRI); + LOG.debug("Split " + sf.getPath() + " into " + path_first + " & " + path_second); } return new Pair(path_first, path_second); } @@ -697,7 +698,7 @@ public class SplitTableRegionProcedure } } catch (IOException e) { LOG.error("Row key of mutation from coprocessor is not parsable as region name." - + "Mutations from coprocessor should only for hbase:meta table."); + + "Mutations from coprocessor should only for hbase:meta table.", e); throw e; } } @@ -709,16 +710,7 @@ public class SplitTableRegionProcedure * @throws IOException */ private void updateMetaForDaughterRegions(final MasterProcedureEnv env) throws IOException { - RegionStateTransition.Builder transition = RegionStateTransition.newBuilder(); - transition.setTransitionCode(TransitionCode.SPLIT_PONR); - transition.addRegionInfo(HRegionInfo.convert(parentHRI)); - transition.addRegionInfo(HRegionInfo.convert(daughter_1_HRI)); - transition.addRegionInfo(HRegionInfo.convert(daughter_2_HRI)); - if (env.getMasterServices().getAssignmentManager().onRegionTransition( - getParentRegionState(env).getServerName(), transition.build()) != null) { - throw new IOException("Failed to update meta to add daughter regions in split region " - + parentHRI.getRegionNameAsString()); - } + reportRegionStateTransition(env, TransitionCode.SPLIT_PONR); } /** @@ -741,8 +733,10 @@ public class SplitTableRegionProcedure **/ private void openDaughterRegions(final MasterProcedureEnv env) throws IOException, InterruptedException { + /* TODO env.getMasterServices().getAssignmentManager().assignDaughterRegions( parentHRI, daughter_1_HRI, daughter_2_HRI); + */ } /** @@ -764,10 +758,11 @@ public class SplitTableRegionProcedure private RegionState getParentRegionState(final MasterProcedureEnv env) { RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates(); RegionState state = regionStates.getRegionState(parentHRI); + /* TODO if (state == null) { LOG.warn("Split but not in region states: " + parentHRI); state = regionStates.createRegionState(parentHRI); - } + }*/ return state; } @@ -782,4 +777,4 @@ public class SplitTableRegionProcedure } return traceEnabled; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index 7482831..017fc57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -85,7 +85,7 @@ public class TruncateTableProcedure // TODO: Move out... in the acquireLock() LOG.debug("waiting for '" + getTableName() + "' regions in transition"); - regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + regions = env.getAssignmentManager().getRegionStates().getRegionsOfTable(getTableName()); assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; ProcedureSyncWait.waitRegionInTransition(env, regions); @@ -121,12 +121,14 @@ public class TruncateTableProcedure setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS); break; case TRUNCATE_TABLE_ASSIGN_REGIONS: - CreateTableProcedure.assignRegions(env, getTableName(), regions); + CreateTableProcedure.setEnablingState(env, getTableName()); + addChildProcedure(env.getAssignmentManager().createAssignProcedures(regions)); setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION); hTableDescriptor = null; regions = null; break; case TRUNCATE_TABLE_POST_OPERATION: + CreateTableProcedure.setEnabledState(env, getTableName()); postTruncate(env); LOG.debug("truncate '" + getTableName() + "' completed"); return Flow.NO_MORE_STATE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 647a770..8ce9fd2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -312,7 +312,7 @@ public class MasterQuotaManager implements RegionStateListener { namespaceQuotaManager.checkQuotaToCreateTable(tName, regions); } } - + public void checkAndUpdateNamespaceRegionQuota(TableName tName, int regions) throws IOException { if (enabled) { namespaceQuotaManager.checkQuotaToUpdateRegion(tName, regions); @@ -329,18 +329,27 @@ public class MasterQuotaManager implements RegionStateListener { return -1; } + @Override public void onRegionMerged(HRegionInfo hri) throws IOException { if (enabled) { namespaceQuotaManager.updateQuotaForRegionMerge(hri); } } + @Override public void onRegionSplit(HRegionInfo hri) throws IOException { if (enabled) { namespaceQuotaManager.checkQuotaToSplitRegion(hri); } } + @Override + public void onRegionSplitReverted(HRegionInfo hri) throws IOException { + if (enabled) { + this.namespaceQuotaManager.removeRegionFromNamespaceUsage(hri); + } + } + /** * Remove table from namespace quota. * @@ -478,12 +487,5 @@ public class MasterQuotaManager implements RegionStateListener { } } } - - @Override - public void onRegionSplitReverted(HRegionInfo hri) throws IOException { - if (enabled) { - this.namespaceQuotaManager.removeRegionFromNamespaceUsage(hri); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java new file mode 100644 index 0000000..2ce8ce5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -0,0 +1,722 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; +import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.StealJobQueue; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.StringUtils; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + +/** + * Compact region on request and then run split if appropriate + */ +@InterfaceAudience.Private +public class CompactSplit implements CompactionRequestor, PropagatingConfigurationObserver { + private static final Log LOG = LogFactory.getLog(CompactSplit.class); + + // Configuration key for the large compaction threads. + public final static String LARGE_COMPACTION_THREADS = + "hbase.regionserver.thread.compaction.large"; + public final static int LARGE_COMPACTION_THREADS_DEFAULT = 1; + + // Configuration key for the small compaction threads. + public final static String SMALL_COMPACTION_THREADS = + "hbase.regionserver.thread.compaction.small"; + public final static int SMALL_COMPACTION_THREADS_DEFAULT = 1; + + // Configuration key for split threads + public final static String SPLIT_THREADS = "hbase.regionserver.thread.split"; + public final static int SPLIT_THREADS_DEFAULT = 1; + + // Configuration keys for merge threads + public final static String MERGE_THREADS = "hbase.regionserver.thread.merge"; + public final static int MERGE_THREADS_DEFAULT = 1; + + public static final String REGION_SERVER_REGION_SPLIT_LIMIT = + "hbase.regionserver.regionSplitLimit"; + public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000; + + private final HRegionServer server; + private final Configuration conf; + + private final ThreadPoolExecutor longCompactions; + private final ThreadPoolExecutor shortCompactions; + private final ThreadPoolExecutor splits; + private final ThreadPoolExecutor mergePool; + + private volatile ThroughputController compactionThroughputController; + + /** + * Splitting should not take place if the total number of regions exceed this. + * This is not a hard limit to the number of regions but it is a guideline to + * stop splitting after number of online regions is greater than this. + */ + private int regionSplitLimit; + + /** @param server */ + CompactSplit(HRegionServer server) { + super(); + this.server = server; + this.conf = server.getConfiguration(); + this.regionSplitLimit = conf.getInt(REGION_SERVER_REGION_SPLIT_LIMIT, + DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT); + + int largeThreads = Math.max(1, conf.getInt( + LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT)); + int smallThreads = conf.getInt( + SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT); + + int splitThreads = conf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT); + + // if we have throttle threads, make sure the user also specified size + Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0); + + final String n = Thread.currentThread().getName(); + + StealJobQueue stealJobQueue = new StealJobQueue<>(); + this.longCompactions = new ThreadPoolExecutor(largeThreads, largeThreads, + 60, TimeUnit.SECONDS, stealJobQueue, + new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = n + "-longCompactions-" + System.currentTimeMillis(); + return new Thread(r, name); + } + }); + this.longCompactions.setRejectedExecutionHandler(new Rejection()); + this.longCompactions.prestartAllCoreThreads(); + this.shortCompactions = new ThreadPoolExecutor(smallThreads, smallThreads, + 60, TimeUnit.SECONDS, stealJobQueue.getStealFromQueue(), + new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = n + "-shortCompactions-" + System.currentTimeMillis(); + return new Thread(r, name); + } + }); + this.shortCompactions + .setRejectedExecutionHandler(new Rejection()); + this.splits = (ThreadPoolExecutor) + Executors.newFixedThreadPool(splitThreads, + new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = n + "-splits-" + System.currentTimeMillis(); + return new Thread(r, name); + } + }); + int mergeThreads = conf.getInt(MERGE_THREADS, MERGE_THREADS_DEFAULT); + this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool( + mergeThreads, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + String name = n + "-merges-" + System.currentTimeMillis(); + return new Thread(r, name); + } + }); + + // compaction throughput controller + this.compactionThroughputController = + CompactionThroughputControllerFactory.create(server, conf); + } + + @Override + public String toString() { + return "compaction_queue=(" + + longCompactions.getQueue().size() + ":" + + shortCompactions.getQueue().size() + ")" + + ", split_queue=" + splits.getQueue().size() + + ", merge_queue=" + mergePool.getQueue().size(); + } + + public String dumpQueue() { + StringBuffer queueLists = new StringBuffer(); + queueLists.append("Compaction/Split Queue dump:\n"); + queueLists.append(" LargeCompation Queue:\n"); + BlockingQueue lq = longCompactions.getQueue(); + Iterator it = lq.iterator(); + while (it.hasNext()) { + queueLists.append(" " + it.next().toString()); + queueLists.append("\n"); + } + + if (shortCompactions != null) { + queueLists.append("\n"); + queueLists.append(" SmallCompation Queue:\n"); + lq = shortCompactions.getQueue(); + it = lq.iterator(); + while (it.hasNext()) { + queueLists.append(" " + it.next().toString()); + queueLists.append("\n"); + } + } + + queueLists.append("\n"); + queueLists.append(" Split Queue:\n"); + lq = splits.getQueue(); + it = lq.iterator(); + while (it.hasNext()) { + queueLists.append(" " + it.next().toString()); + queueLists.append("\n"); + } + + queueLists.append("\n"); + queueLists.append(" Region Merge Queue:\n"); + lq = mergePool.getQueue(); + it = lq.iterator(); + while (it.hasNext()) { + queueLists.append(" " + it.next().toString()); + queueLists.append("\n"); + } + + return queueLists.toString(); + } + + public synchronized boolean requestSplit(final Region r) { + // don't split regions that are blocking + if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= Store.PRIORITY_USER) { + byte[] midKey = ((HRegion)r).checkSplit(); + if (midKey != null) { + requestSplit(r, midKey); + return true; + } + } + return false; + } + + public synchronized void requestSplit(final Region r, byte[] midKey) { + requestSplit(r, midKey, null); + } + + /* + * The User parameter allows the split thread to assume the correct user identity + */ + public synchronized void requestSplit(final Region r, byte[] midKey, User user) { + if (midKey == null) { + LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + + " not splittable because midkey=null"); + if (((HRegion)r).shouldForceSplit()) { + ((HRegion)r).clearSplit(); + } + return; + } + try { + this.splits.execute(new SplitRequest(r, midKey, this.server, user)); + if (LOG.isDebugEnabled()) { + LOG.debug("Splitting " + r + ", " + this); + } + } catch (RejectedExecutionException ree) { + LOG.info("Could not execute split for " + r, ree); + } + } + + @Override + public synchronized List requestCompaction(final Region r, final String why) + throws IOException { + return requestCompaction(r, why, null); + } + + @Override + public synchronized List requestCompaction(final Region r, final String why, + List> requests) throws IOException { + return requestCompaction(r, why, Store.NO_PRIORITY, requests, null); + } + + @Override + public synchronized CompactionRequest requestCompaction(final Region r, final Store s, + final String why, CompactionRequest request) throws IOException { + return requestCompaction(r, s, why, Store.NO_PRIORITY, request, null); + } + + @Override + public synchronized List requestCompaction(final Region r, final String why, + int p, List> requests, User user) throws IOException { + return requestCompactionInternal(r, why, p, requests, true, user); + } + + private List requestCompactionInternal(final Region r, final String why, + int p, List> requests, boolean selectNow, User user) + throws IOException { + // not a special compaction request, so make our own list + List ret = null; + if (requests == null) { + ret = selectNow ? new ArrayList(r.getStores().size()) : null; + for (Store s : r.getStores()) { + CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user); + if (selectNow) ret.add(cr); + } + } else { + Preconditions.checkArgument(selectNow); // only system requests have selectNow == false + ret = new ArrayList(requests.size()); + for (Pair pair : requests) { + ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user)); + } + } + return ret; + } + + public CompactionRequest requestCompaction(final Region r, final Store s, + final String why, int priority, CompactionRequest request, User user) throws IOException { + return requestCompactionInternal(r, s, why, priority, request, true, user); + } + + public synchronized void requestSystemCompaction( + final Region r, final String why) throws IOException { + requestCompactionInternal(r, why, Store.NO_PRIORITY, null, false, null); + } + + public void requestSystemCompaction( + final Region r, final Store s, final String why) throws IOException { + requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false, null); + } + + /** + * @param r region store belongs to + * @param s Store to request compaction on + * @param why Why compaction requested -- used in debug messages + * @param priority override the default priority (NO_PRIORITY == decide) + * @param request custom compaction request. Can be null in which case a simple + * compaction will be used. + */ + private synchronized CompactionRequest requestCompactionInternal(final Region r, final Store s, + final String why, int priority, CompactionRequest request, boolean selectNow, User user) + throws IOException { + if (this.server.isStopped() + || (r.getTableDesc() != null && !r.getTableDesc().isCompactionEnabled())) { + return null; + } + + CompactionContext compaction = null; + if (selectNow) { + compaction = selectCompaction(r, s, priority, request, user); + if (compaction == null) return null; // message logged inside + } + + // We assume that most compactions are small. So, put system compactions into small + // pool; we will do selection there, and move to large pool if necessary. + ThreadPoolExecutor pool = (selectNow && s.throttleCompaction(compaction.getRequest().getSize())) + ? longCompactions : shortCompactions; + pool.execute(new CompactionRunner(s, r, compaction, pool, user)); + if (LOG.isDebugEnabled()) { + String type = (pool == shortCompactions) ? "Small " : "Large "; + LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system") + + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this); + } + return selectNow ? compaction.getRequest() : null; + } + + private CompactionContext selectCompaction(final Region r, final Store s, + int priority, CompactionRequest request, User user) throws IOException { + CompactionContext compaction = s.requestCompaction(priority, request, user); + if (compaction == null) { + if(LOG.isDebugEnabled() && r.getRegionInfo() != null) { + LOG.debug("Not compacting " + r.getRegionInfo().getRegionNameAsString() + + " because compaction request was cancelled"); + } + return null; + } + assert compaction.hasSelection(); + if (priority != Store.NO_PRIORITY) { + compaction.getRequest().setPriority(priority); + } + return compaction; + } + + /** + * Only interrupt once it's done with a run through the work loop. + */ + void interruptIfNecessary() { + splits.shutdown(); + mergePool.shutdown(); + longCompactions.shutdown(); + shortCompactions.shutdown(); + } + + private void waitFor(ThreadPoolExecutor t, String name) { + boolean done = false; + while (!done) { + try { + done = t.awaitTermination(60, TimeUnit.SECONDS); + LOG.info("Waiting for " + name + " to finish..."); + if (!done) { + t.shutdownNow(); + } + } catch (InterruptedException ie) { + LOG.warn("Interrupted waiting for " + name + " to finish..."); + } + } + } + + void join() { + waitFor(splits, "Split Thread"); + waitFor(mergePool, "Merge Thread"); + waitFor(longCompactions, "Large Compaction Thread"); + waitFor(shortCompactions, "Small Compaction Thread"); + } + + /** + * Returns the current size of the queue containing regions that are + * processed. + * + * @return The current size of the regions queue. + */ + public int getCompactionQueueSize() { + return longCompactions.getQueue().size() + shortCompactions.getQueue().size(); + } + + public int getLargeCompactionQueueSize() { + return longCompactions.getQueue().size(); + } + + + public int getSmallCompactionQueueSize() { + return shortCompactions.getQueue().size(); + } + + public int getSplitQueueSize() { + return splits.getQueue().size(); + } + + private boolean shouldSplitRegion() { + if(server.getNumberOfOnlineRegions() > 0.9*regionSplitLimit) { + LOG.warn("Total number of regions is approaching the upper limit " + regionSplitLimit + ". " + + "Please consider taking a look at http://hbase.apache.org/book.html#ops.regionmgt"); + } + return (regionSplitLimit > server.getNumberOfOnlineRegions()); + } + + /** + * @return the regionSplitLimit + */ + public int getRegionSplitLimit() { + return this.regionSplitLimit; + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", + justification="Contrived use of compareTo") + private class CompactionRunner implements Runnable, Comparable { + private final Store store; + private final HRegion region; + private CompactionContext compaction; + private int queuedPriority; + private ThreadPoolExecutor parent; + private User user; + + public CompactionRunner(Store store, Region region, + CompactionContext compaction, ThreadPoolExecutor parent, User user) { + super(); + this.store = store; + this.region = (HRegion)region; + this.compaction = compaction; + this.queuedPriority = (this.compaction == null) + ? store.getCompactPriority() : compaction.getRequest().getPriority(); + this.parent = parent; + this.user = user; + } + + @Override + public String toString() { + return (this.compaction != null) ? ("Request = " + compaction.getRequest()) + : ("Store = " + store.toString() + ", pri = " + queuedPriority); + } + + private void doCompaction(User user) { + // Common case - system compaction without a file selection. Select now. + if (this.compaction == null) { + int oldPriority = this.queuedPriority; + this.queuedPriority = this.store.getCompactPriority(); + if (this.queuedPriority > oldPriority) { + // Store priority decreased while we were in queue (due to some other compaction?), + // requeue with new priority to avoid blocking potential higher priorities. + this.parent.execute(this); + return; + } + try { + this.compaction = selectCompaction(this.region, this.store, queuedPriority, null, user); + } catch (IOException ex) { + LOG.error("Compaction selection failed " + this, ex); + server.checkFileSystem(); + return; + } + if (this.compaction == null) return; // nothing to do + // Now see if we are in correct pool for the size; if not, go to the correct one. + // We might end up waiting for a while, so cancel the selection. + assert this.compaction.hasSelection(); + ThreadPoolExecutor pool = store.throttleCompaction( + compaction.getRequest().getSize()) ? longCompactions : shortCompactions; + + // Long compaction pool can process small job + // Short compaction pool should not process large job + if (this.parent == shortCompactions && pool == longCompactions) { + this.store.cancelRequestedCompaction(this.compaction); + this.compaction = null; + this.parent = pool; + this.parent.execute(this); + return; + } + } + // Finally we can compact something. + assert this.compaction != null; + + this.compaction.getRequest().beforeExecute(); + try { + // Note: please don't put single-compaction logic here; + // put it into region/store/etc. This is CST logic. + long start = EnvironmentEdgeManager.currentTime(); + boolean completed = + region.compact(compaction, store, compactionThroughputController, user); + long now = EnvironmentEdgeManager.currentTime(); + LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " + + this + "; duration=" + StringUtils.formatTimeDiff(now, start)); + if (completed) { + // degenerate case: blocked regions require recursive enqueues + if (store.getCompactPriority() <= 0) { + requestSystemCompaction(region, store, "Recursive enqueue"); + } else { + // see if the compaction has caused us to exceed max region size + requestSplit(region); + } + } + } catch (IOException ex) { + IOException remoteEx = + ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; + LOG.error("Compaction failed " + this, remoteEx); + if (remoteEx != ex) { + LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex)); + } + region.reportCompactionRequestFailure(); + server.checkFileSystem(); + } catch (Exception ex) { + LOG.error("Compaction failed " + this, ex); + region.reportCompactionRequestFailure(); + server.checkFileSystem(); + } finally { + LOG.debug("CompactSplitThread Status: " + CompactSplit.this); + } + this.compaction.getRequest().afterExecute(); + } + + @Override + public void run() { + Preconditions.checkNotNull(server); + if (server.isStopped() + || (region.getTableDesc() != null && !region.getTableDesc().isCompactionEnabled())) { + return; + } + doCompaction(user); + } + + private String formatStackTrace(Exception ex) { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + ex.printStackTrace(pw); + pw.flush(); + return sw.toString(); + } + + @Override + public int compareTo(CompactionRunner o) { + // Only compare the underlying request (if any), for queue sorting purposes. + int compareVal = queuedPriority - o.queuedPriority; // compare priority + if (compareVal != 0) return compareVal; + CompactionContext tc = this.compaction, oc = o.compaction; + // Sort pre-selected (user?) compactions before system ones with equal priority. + return (tc == null) ? ((oc == null) ? 0 : 1) + : ((oc == null) ? -1 : tc.getRequest().compareTo(oc.getRequest())); + } + } + + /** + * Cleanup class to use when rejecting a compaction request from the queue. + */ + private static class Rejection implements RejectedExecutionHandler { + @Override + public void rejectedExecution(Runnable runnable, ThreadPoolExecutor pool) { + if (runnable instanceof CompactionRunner) { + CompactionRunner runner = (CompactionRunner)runnable; + LOG.debug("Compaction Rejected: " + runner); + runner.store.cancelRequestedCompaction(runner.compaction); + } + } + } + + /** + * {@inheritDoc} + */ + @Override + public void onConfigurationChange(Configuration newConf) { + // Check if number of large / small compaction threads has changed, and then + // adjust the core pool size of the thread pools, by using the + // setCorePoolSize() method. According to the javadocs, it is safe to + // change the core pool size on-the-fly. We need to reset the maximum + // pool size, as well. + int largeThreads = Math.max(1, newConf.getInt( + LARGE_COMPACTION_THREADS, + LARGE_COMPACTION_THREADS_DEFAULT)); + if (this.longCompactions.getCorePoolSize() != largeThreads) { + LOG.info("Changing the value of " + LARGE_COMPACTION_THREADS + + " from " + this.longCompactions.getCorePoolSize() + " to " + + largeThreads); + if(this.longCompactions.getCorePoolSize() < largeThreads) { + this.longCompactions.setMaximumPoolSize(largeThreads); + this.longCompactions.setCorePoolSize(largeThreads); + } else { + this.longCompactions.setCorePoolSize(largeThreads); + this.longCompactions.setMaximumPoolSize(largeThreads); + } + } + + int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS, + SMALL_COMPACTION_THREADS_DEFAULT); + if (this.shortCompactions.getCorePoolSize() != smallThreads) { + LOG.info("Changing the value of " + SMALL_COMPACTION_THREADS + + " from " + this.shortCompactions.getCorePoolSize() + " to " + + smallThreads); + if(this.shortCompactions.getCorePoolSize() < smallThreads) { + this.shortCompactions.setMaximumPoolSize(smallThreads); + this.shortCompactions.setCorePoolSize(smallThreads); + } else { + this.shortCompactions.setCorePoolSize(smallThreads); + this.shortCompactions.setMaximumPoolSize(smallThreads); + } + } + + int splitThreads = newConf.getInt(SPLIT_THREADS, + SPLIT_THREADS_DEFAULT); + if (this.splits.getCorePoolSize() != splitThreads) { + LOG.info("Changing the value of " + SPLIT_THREADS + + " from " + this.splits.getCorePoolSize() + " to " + + splitThreads); + if(this.splits.getCorePoolSize() < splitThreads) { + this.splits.setMaximumPoolSize(splitThreads); + this.splits.setCorePoolSize(splitThreads); + } else { + this.splits.setCorePoolSize(splitThreads); + this.splits.setMaximumPoolSize(splitThreads); + } + } + + int mergeThreads = newConf.getInt(MERGE_THREADS, + MERGE_THREADS_DEFAULT); + if (this.mergePool.getCorePoolSize() != mergeThreads) { + LOG.info("Changing the value of " + MERGE_THREADS + + " from " + this.mergePool.getCorePoolSize() + " to " + + mergeThreads); + if(this.mergePool.getCorePoolSize() < mergeThreads) { + this.mergePool.setMaximumPoolSize(mergeThreads); + this.mergePool.setCorePoolSize(mergeThreads); + } else { + this.mergePool.setCorePoolSize(mergeThreads); + this.mergePool.setMaximumPoolSize(mergeThreads); + } + } + + ThroughputController old = this.compactionThroughputController; + if (old != null) { + old.stop("configuration change"); + } + this.compactionThroughputController = + CompactionThroughputControllerFactory.create(server, newConf); + + // We change this atomically here instead of reloading the config in order that upstream + // would be the only one with the flexibility to reload the config. + this.conf.reloadConfiguration(); + } + + protected int getSmallCompactionThreadNum() { + return this.shortCompactions.getCorePoolSize(); + } + + protected int getLargeCompactionThreadNum() { + return this.longCompactions.getCorePoolSize(); + } + + protected int getSplitThreadNum() { + return this.splits.getCorePoolSize(); + } + + protected int getMergeThreadNum() { + return this.mergePool.getCorePoolSize(); + } + + /** + * {@inheritDoc} + */ + @Override + public void registerChildren(ConfigurationManager manager) { + // No children to register. + } + + /** + * {@inheritDoc} + */ + @Override + public void deregisterChildren(ConfigurationManager manager) { + // No children to register + } + + @VisibleForTesting + public ThroughputController getCompactionThroughputController() { + return compactionThroughputController; + } + + @VisibleForTesting + public long getCompletedMergeTaskCount() { + return mergePool.getCompletedTaskCount(); + } + + @VisibleForTesting + /** + * Shutdown the long compaction thread pool. + * Should only be used in unit test to prevent long compaction thread pool from stealing job + * from short compaction queue + */ + void shutdownLongCompactions(){ + this.longCompactions.shutdown(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java deleted file mode 100644 index 6870445..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ /dev/null @@ -1,722 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.RejectedExecutionHandler; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.conf.ConfigurationManager; -import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; -import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; -import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.StealJobQueue; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.util.StringUtils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * Compact region on request and then run split if appropriate - */ -@InterfaceAudience.Private -public class CompactSplitThread implements CompactionRequestor, PropagatingConfigurationObserver { - private static final Log LOG = LogFactory.getLog(CompactSplitThread.class); - - // Configuration key for the large compaction threads. - public final static String LARGE_COMPACTION_THREADS = - "hbase.regionserver.thread.compaction.large"; - public final static int LARGE_COMPACTION_THREADS_DEFAULT = 1; - - // Configuration key for the small compaction threads. - public final static String SMALL_COMPACTION_THREADS = - "hbase.regionserver.thread.compaction.small"; - public final static int SMALL_COMPACTION_THREADS_DEFAULT = 1; - - // Configuration key for split threads - public final static String SPLIT_THREADS = "hbase.regionserver.thread.split"; - public final static int SPLIT_THREADS_DEFAULT = 1; - - // Configuration keys for merge threads - public final static String MERGE_THREADS = "hbase.regionserver.thread.merge"; - public final static int MERGE_THREADS_DEFAULT = 1; - - public static final String REGION_SERVER_REGION_SPLIT_LIMIT = - "hbase.regionserver.regionSplitLimit"; - public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000; - - private final HRegionServer server; - private final Configuration conf; - - private final ThreadPoolExecutor longCompactions; - private final ThreadPoolExecutor shortCompactions; - private final ThreadPoolExecutor splits; - private final ThreadPoolExecutor mergePool; - - private volatile ThroughputController compactionThroughputController; - - /** - * Splitting should not take place if the total number of regions exceed this. - * This is not a hard limit to the number of regions but it is a guideline to - * stop splitting after number of online regions is greater than this. - */ - private int regionSplitLimit; - - /** @param server */ - CompactSplitThread(HRegionServer server) { - super(); - this.server = server; - this.conf = server.getConfiguration(); - this.regionSplitLimit = conf.getInt(REGION_SERVER_REGION_SPLIT_LIMIT, - DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT); - - int largeThreads = Math.max(1, conf.getInt( - LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT)); - int smallThreads = conf.getInt( - SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT); - - int splitThreads = conf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT); - - // if we have throttle threads, make sure the user also specified size - Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0); - - final String n = Thread.currentThread().getName(); - - StealJobQueue stealJobQueue = new StealJobQueue<>(); - this.longCompactions = new ThreadPoolExecutor(largeThreads, largeThreads, - 60, TimeUnit.SECONDS, stealJobQueue, - new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = n + "-longCompactions-" + System.currentTimeMillis(); - return new Thread(r, name); - } - }); - this.longCompactions.setRejectedExecutionHandler(new Rejection()); - this.longCompactions.prestartAllCoreThreads(); - this.shortCompactions = new ThreadPoolExecutor(smallThreads, smallThreads, - 60, TimeUnit.SECONDS, stealJobQueue.getStealFromQueue(), - new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = n + "-shortCompactions-" + System.currentTimeMillis(); - return new Thread(r, name); - } - }); - this.shortCompactions - .setRejectedExecutionHandler(new Rejection()); - this.splits = (ThreadPoolExecutor) - Executors.newFixedThreadPool(splitThreads, - new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = n + "-splits-" + System.currentTimeMillis(); - return new Thread(r, name); - } - }); - int mergeThreads = conf.getInt(MERGE_THREADS, MERGE_THREADS_DEFAULT); - this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool( - mergeThreads, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - String name = n + "-merges-" + System.currentTimeMillis(); - return new Thread(r, name); - } - }); - - // compaction throughput controller - this.compactionThroughputController = - CompactionThroughputControllerFactory.create(server, conf); - } - - @Override - public String toString() { - return "compaction_queue=(" - + longCompactions.getQueue().size() + ":" - + shortCompactions.getQueue().size() + ")" - + ", split_queue=" + splits.getQueue().size() - + ", merge_queue=" + mergePool.getQueue().size(); - } - - public String dumpQueue() { - StringBuffer queueLists = new StringBuffer(); - queueLists.append("Compaction/Split Queue dump:\n"); - queueLists.append(" LargeCompation Queue:\n"); - BlockingQueue lq = longCompactions.getQueue(); - Iterator it = lq.iterator(); - while (it.hasNext()) { - queueLists.append(" " + it.next().toString()); - queueLists.append("\n"); - } - - if (shortCompactions != null) { - queueLists.append("\n"); - queueLists.append(" SmallCompation Queue:\n"); - lq = shortCompactions.getQueue(); - it = lq.iterator(); - while (it.hasNext()) { - queueLists.append(" " + it.next().toString()); - queueLists.append("\n"); - } - } - - queueLists.append("\n"); - queueLists.append(" Split Queue:\n"); - lq = splits.getQueue(); - it = lq.iterator(); - while (it.hasNext()) { - queueLists.append(" " + it.next().toString()); - queueLists.append("\n"); - } - - queueLists.append("\n"); - queueLists.append(" Region Merge Queue:\n"); - lq = mergePool.getQueue(); - it = lq.iterator(); - while (it.hasNext()) { - queueLists.append(" " + it.next().toString()); - queueLists.append("\n"); - } - - return queueLists.toString(); - } - - public synchronized boolean requestSplit(final Region r) { - // don't split regions that are blocking - if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= Store.PRIORITY_USER) { - byte[] midKey = ((HRegion)r).checkSplit(); - if (midKey != null) { - requestSplit(r, midKey); - return true; - } - } - return false; - } - - public synchronized void requestSplit(final Region r, byte[] midKey) { - requestSplit(r, midKey, null); - } - - /* - * The User parameter allows the split thread to assume the correct user identity - */ - public synchronized void requestSplit(final Region r, byte[] midKey, User user) { - if (midKey == null) { - LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + - " not splittable because midkey=null"); - if (((HRegion)r).shouldForceSplit()) { - ((HRegion)r).clearSplit(); - } - return; - } - try { - this.splits.execute(new SplitRequest(r, midKey, this.server, user)); - if (LOG.isDebugEnabled()) { - LOG.debug("Split requested for " + r + ". " + this); - } - } catch (RejectedExecutionException ree) { - LOG.info("Could not execute split for " + r, ree); - } - } - - @Override - public synchronized List requestCompaction(final Region r, final String why) - throws IOException { - return requestCompaction(r, why, null); - } - - @Override - public synchronized List requestCompaction(final Region r, final String why, - List> requests) throws IOException { - return requestCompaction(r, why, Store.NO_PRIORITY, requests, null); - } - - @Override - public synchronized CompactionRequest requestCompaction(final Region r, final Store s, - final String why, CompactionRequest request) throws IOException { - return requestCompaction(r, s, why, Store.NO_PRIORITY, request, null); - } - - @Override - public synchronized List requestCompaction(final Region r, final String why, - int p, List> requests, User user) throws IOException { - return requestCompactionInternal(r, why, p, requests, true, user); - } - - private List requestCompactionInternal(final Region r, final String why, - int p, List> requests, boolean selectNow, User user) - throws IOException { - // not a special compaction request, so make our own list - List ret = null; - if (requests == null) { - ret = selectNow ? new ArrayList(r.getStores().size()) : null; - for (Store s : r.getStores()) { - CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user); - if (selectNow) ret.add(cr); - } - } else { - Preconditions.checkArgument(selectNow); // only system requests have selectNow == false - ret = new ArrayList(requests.size()); - for (Pair pair : requests) { - ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user)); - } - } - return ret; - } - - public CompactionRequest requestCompaction(final Region r, final Store s, - final String why, int priority, CompactionRequest request, User user) throws IOException { - return requestCompactionInternal(r, s, why, priority, request, true, user); - } - - public synchronized void requestSystemCompaction( - final Region r, final String why) throws IOException { - requestCompactionInternal(r, why, Store.NO_PRIORITY, null, false, null); - } - - public void requestSystemCompaction( - final Region r, final Store s, final String why) throws IOException { - requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false, null); - } - - /** - * @param r region store belongs to - * @param s Store to request compaction on - * @param why Why compaction requested -- used in debug messages - * @param priority override the default priority (NO_PRIORITY == decide) - * @param request custom compaction request. Can be null in which case a simple - * compaction will be used. - */ - private synchronized CompactionRequest requestCompactionInternal(final Region r, final Store s, - final String why, int priority, CompactionRequest request, boolean selectNow, User user) - throws IOException { - if (this.server.isStopped() - || (r.getTableDesc() != null && !r.getTableDesc().isCompactionEnabled())) { - return null; - } - - CompactionContext compaction = null; - if (selectNow) { - compaction = selectCompaction(r, s, priority, request, user); - if (compaction == null) return null; // message logged inside - } - - // We assume that most compactions are small. So, put system compactions into small - // pool; we will do selection there, and move to large pool if necessary. - ThreadPoolExecutor pool = (selectNow && s.throttleCompaction(compaction.getRequest().getSize())) - ? longCompactions : shortCompactions; - pool.execute(new CompactionRunner(s, r, compaction, pool, user)); - if (LOG.isDebugEnabled()) { - String type = (pool == shortCompactions) ? "Small " : "Large "; - LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system") - + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this); - } - return selectNow ? compaction.getRequest() : null; - } - - private CompactionContext selectCompaction(final Region r, final Store s, - int priority, CompactionRequest request, User user) throws IOException { - CompactionContext compaction = s.requestCompaction(priority, request, user); - if (compaction == null) { - if(LOG.isDebugEnabled() && r.getRegionInfo() != null) { - LOG.debug("Not compacting " + r.getRegionInfo().getRegionNameAsString() + - " because compaction request was cancelled"); - } - return null; - } - assert compaction.hasSelection(); - if (priority != Store.NO_PRIORITY) { - compaction.getRequest().setPriority(priority); - } - return compaction; - } - - /** - * Only interrupt once it's done with a run through the work loop. - */ - void interruptIfNecessary() { - splits.shutdown(); - mergePool.shutdown(); - longCompactions.shutdown(); - shortCompactions.shutdown(); - } - - private void waitFor(ThreadPoolExecutor t, String name) { - boolean done = false; - while (!done) { - try { - done = t.awaitTermination(60, TimeUnit.SECONDS); - LOG.info("Waiting for " + name + " to finish..."); - if (!done) { - t.shutdownNow(); - } - } catch (InterruptedException ie) { - LOG.warn("Interrupted waiting for " + name + " to finish..."); - } - } - } - - void join() { - waitFor(splits, "Split Thread"); - waitFor(mergePool, "Merge Thread"); - waitFor(longCompactions, "Large Compaction Thread"); - waitFor(shortCompactions, "Small Compaction Thread"); - } - - /** - * Returns the current size of the queue containing regions that are - * processed. - * - * @return The current size of the regions queue. - */ - public int getCompactionQueueSize() { - return longCompactions.getQueue().size() + shortCompactions.getQueue().size(); - } - - public int getLargeCompactionQueueSize() { - return longCompactions.getQueue().size(); - } - - - public int getSmallCompactionQueueSize() { - return shortCompactions.getQueue().size(); - } - - public int getSplitQueueSize() { - return splits.getQueue().size(); - } - - private boolean shouldSplitRegion() { - if(server.getNumberOfOnlineRegions() > 0.9*regionSplitLimit) { - LOG.warn("Total number of regions is approaching the upper limit " + regionSplitLimit + ". " - + "Please consider taking a look at http://hbase.apache.org/book.html#ops.regionmgt"); - } - return (regionSplitLimit > server.getNumberOfOnlineRegions()); - } - - /** - * @return the regionSplitLimit - */ - public int getRegionSplitLimit() { - return this.regionSplitLimit; - } - - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", - justification="Contrived use of compareTo") - private class CompactionRunner implements Runnable, Comparable { - private final Store store; - private final HRegion region; - private CompactionContext compaction; - private int queuedPriority; - private ThreadPoolExecutor parent; - private User user; - - public CompactionRunner(Store store, Region region, - CompactionContext compaction, ThreadPoolExecutor parent, User user) { - super(); - this.store = store; - this.region = (HRegion)region; - this.compaction = compaction; - this.queuedPriority = (this.compaction == null) - ? store.getCompactPriority() : compaction.getRequest().getPriority(); - this.parent = parent; - this.user = user; - } - - @Override - public String toString() { - return (this.compaction != null) ? ("Request = " + compaction.getRequest()) - : ("Store = " + store.toString() + ", pri = " + queuedPriority); - } - - private void doCompaction(User user) { - // Common case - system compaction without a file selection. Select now. - if (this.compaction == null) { - int oldPriority = this.queuedPriority; - this.queuedPriority = this.store.getCompactPriority(); - if (this.queuedPriority > oldPriority) { - // Store priority decreased while we were in queue (due to some other compaction?), - // requeue with new priority to avoid blocking potential higher priorities. - this.parent.execute(this); - return; - } - try { - this.compaction = selectCompaction(this.region, this.store, queuedPriority, null, user); - } catch (IOException ex) { - LOG.error("Compaction selection failed " + this, ex); - server.checkFileSystem(); - return; - } - if (this.compaction == null) return; // nothing to do - // Now see if we are in correct pool for the size; if not, go to the correct one. - // We might end up waiting for a while, so cancel the selection. - assert this.compaction.hasSelection(); - ThreadPoolExecutor pool = store.throttleCompaction( - compaction.getRequest().getSize()) ? longCompactions : shortCompactions; - - // Long compaction pool can process small job - // Short compaction pool should not process large job - if (this.parent == shortCompactions && pool == longCompactions) { - this.store.cancelRequestedCompaction(this.compaction); - this.compaction = null; - this.parent = pool; - this.parent.execute(this); - return; - } - } - // Finally we can compact something. - assert this.compaction != null; - - this.compaction.getRequest().beforeExecute(); - try { - // Note: please don't put single-compaction logic here; - // put it into region/store/etc. This is CST logic. - long start = EnvironmentEdgeManager.currentTime(); - boolean completed = - region.compact(compaction, store, compactionThroughputController, user); - long now = EnvironmentEdgeManager.currentTime(); - LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " + - this + "; duration=" + StringUtils.formatTimeDiff(now, start)); - if (completed) { - // degenerate case: blocked regions require recursive enqueues - if (store.getCompactPriority() <= 0) { - requestSystemCompaction(region, store, "Recursive enqueue"); - } else { - // see if the compaction has caused us to exceed max region size - requestSplit(region); - } - } - } catch (IOException ex) { - IOException remoteEx = - ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; - LOG.error("Compaction failed " + this, remoteEx); - if (remoteEx != ex) { - LOG.info("Compaction failed at original callstack: " + formatStackTrace(ex)); - } - region.reportCompactionRequestFailure(); - server.checkFileSystem(); - } catch (Exception ex) { - LOG.error("Compaction failed " + this, ex); - region.reportCompactionRequestFailure(); - server.checkFileSystem(); - } finally { - LOG.debug("CompactSplitThread Status: " + CompactSplitThread.this); - } - this.compaction.getRequest().afterExecute(); - } - - @Override - public void run() { - Preconditions.checkNotNull(server); - if (server.isStopped() - || (region.getTableDesc() != null && !region.getTableDesc().isCompactionEnabled())) { - return; - } - doCompaction(user); - } - - private String formatStackTrace(Exception ex) { - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - ex.printStackTrace(pw); - pw.flush(); - return sw.toString(); - } - - @Override - public int compareTo(CompactionRunner o) { - // Only compare the underlying request (if any), for queue sorting purposes. - int compareVal = queuedPriority - o.queuedPriority; // compare priority - if (compareVal != 0) return compareVal; - CompactionContext tc = this.compaction, oc = o.compaction; - // Sort pre-selected (user?) compactions before system ones with equal priority. - return (tc == null) ? ((oc == null) ? 0 : 1) - : ((oc == null) ? -1 : tc.getRequest().compareTo(oc.getRequest())); - } - } - - /** - * Cleanup class to use when rejecting a compaction request from the queue. - */ - private static class Rejection implements RejectedExecutionHandler { - @Override - public void rejectedExecution(Runnable runnable, ThreadPoolExecutor pool) { - if (runnable instanceof CompactionRunner) { - CompactionRunner runner = (CompactionRunner)runnable; - LOG.debug("Compaction Rejected: " + runner); - runner.store.cancelRequestedCompaction(runner.compaction); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public void onConfigurationChange(Configuration newConf) { - // Check if number of large / small compaction threads has changed, and then - // adjust the core pool size of the thread pools, by using the - // setCorePoolSize() method. According to the javadocs, it is safe to - // change the core pool size on-the-fly. We need to reset the maximum - // pool size, as well. - int largeThreads = Math.max(1, newConf.getInt( - LARGE_COMPACTION_THREADS, - LARGE_COMPACTION_THREADS_DEFAULT)); - if (this.longCompactions.getCorePoolSize() != largeThreads) { - LOG.info("Changing the value of " + LARGE_COMPACTION_THREADS + - " from " + this.longCompactions.getCorePoolSize() + " to " + - largeThreads); - if(this.longCompactions.getCorePoolSize() < largeThreads) { - this.longCompactions.setMaximumPoolSize(largeThreads); - this.longCompactions.setCorePoolSize(largeThreads); - } else { - this.longCompactions.setCorePoolSize(largeThreads); - this.longCompactions.setMaximumPoolSize(largeThreads); - } - } - - int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS, - SMALL_COMPACTION_THREADS_DEFAULT); - if (this.shortCompactions.getCorePoolSize() != smallThreads) { - LOG.info("Changing the value of " + SMALL_COMPACTION_THREADS + - " from " + this.shortCompactions.getCorePoolSize() + " to " + - smallThreads); - if(this.shortCompactions.getCorePoolSize() < smallThreads) { - this.shortCompactions.setMaximumPoolSize(smallThreads); - this.shortCompactions.setCorePoolSize(smallThreads); - } else { - this.shortCompactions.setCorePoolSize(smallThreads); - this.shortCompactions.setMaximumPoolSize(smallThreads); - } - } - - int splitThreads = newConf.getInt(SPLIT_THREADS, - SPLIT_THREADS_DEFAULT); - if (this.splits.getCorePoolSize() != splitThreads) { - LOG.info("Changing the value of " + SPLIT_THREADS + - " from " + this.splits.getCorePoolSize() + " to " + - splitThreads); - if(this.splits.getCorePoolSize() < splitThreads) { - this.splits.setMaximumPoolSize(splitThreads); - this.splits.setCorePoolSize(splitThreads); - } else { - this.splits.setCorePoolSize(splitThreads); - this.splits.setMaximumPoolSize(splitThreads); - } - } - - int mergeThreads = newConf.getInt(MERGE_THREADS, - MERGE_THREADS_DEFAULT); - if (this.mergePool.getCorePoolSize() != mergeThreads) { - LOG.info("Changing the value of " + MERGE_THREADS + - " from " + this.mergePool.getCorePoolSize() + " to " + - mergeThreads); - if(this.mergePool.getCorePoolSize() < mergeThreads) { - this.mergePool.setMaximumPoolSize(mergeThreads); - this.mergePool.setCorePoolSize(mergeThreads); - } else { - this.mergePool.setCorePoolSize(mergeThreads); - this.mergePool.setMaximumPoolSize(mergeThreads); - } - } - - ThroughputController old = this.compactionThroughputController; - if (old != null) { - old.stop("configuration change"); - } - this.compactionThroughputController = - CompactionThroughputControllerFactory.create(server, newConf); - - // We change this atomically here instead of reloading the config in order that upstream - // would be the only one with the flexibility to reload the config. - this.conf.reloadConfiguration(); - } - - protected int getSmallCompactionThreadNum() { - return this.shortCompactions.getCorePoolSize(); - } - - protected int getLargeCompactionThreadNum() { - return this.longCompactions.getCorePoolSize(); - } - - protected int getSplitThreadNum() { - return this.splits.getCorePoolSize(); - } - - protected int getMergeThreadNum() { - return this.mergePool.getCorePoolSize(); - } - - /** - * {@inheritDoc} - */ - @Override - public void registerChildren(ConfigurationManager manager) { - // No children to register. - } - - /** - * {@inheritDoc} - */ - @Override - public void deregisterChildren(ConfigurationManager manager) { - // No children to register - } - - @VisibleForTesting - public ThroughputController getCompactionThroughputController() { - return compactionThroughputController; - } - - @VisibleForTesting - public long getCompletedMergeTaskCount() { - return mergePool.getCompletedTaskCount(); - } - - @VisibleForTesting - /** - * Shutdown the long compaction thread pool. - * Should only be used in unit test to prevent long compaction thread pool from stealing job - * from short compaction queue - */ - void shutdownLongCompactions(){ - this.longCompactions.shutdown(); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 07807fd..a0c1206 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -274,7 +274,7 @@ public class HRegionServer extends HasThread implements protected ReplicationSinkService replicationSinkHandler; // Compactions - public CompactSplitThread compactSplitThread; + public CompactSplit compactSplitThread; /** * Map of regions currently being served by this region server. Key is the @@ -903,7 +903,7 @@ public class HRegionServer extends HasThread implements this.cacheFlusher = new MemStoreFlusher(conf, this); // Compaction thread - this.compactSplitThread = new CompactSplitThread(this); + this.compactSplitThread = new CompactSplit(this); // Background thread to check for compactions; needed if region has not gotten updates // in a while. It will take care of not checking too frequently on store-by-store basis. @@ -1684,7 +1684,7 @@ public class HRegionServer extends HasThread implements final static int RANGE_OF_DELAY = 5 * 60 * 1000; // 5 min in milliseconds final static int MIN_DELAY_TIME = 0; // millisec public PeriodicMemstoreFlusher(int cacheFlushInterval, final HRegionServer server) { - super(server.getServerName() + "-MemstoreFlusherChore", server, cacheFlushInterval); + super("MemstoreFlusherChore", server, cacheFlushInterval); this.server = server; } @@ -2106,6 +2106,7 @@ public class HRegionServer extends HasThread implements + " to " + code + ": " + response.getErrorMessage()); return false; } + LOG.info("TRANSITION REPORTED " + request); return true; } catch (ServiceException se) { IOException ioe = ProtobufUtil.getRemoteException(se); @@ -2115,6 +2116,7 @@ public class HRegionServer extends HasThread implements } } } + LOG.info("TRANSITION NOT REPORTED " + request); return false; } @@ -3414,9 +3416,9 @@ public class HRegionServer extends HasThread implements } /** - * @return the underlying {@link CompactSplitThread} for the servers + * @return the underlying {@link CompactSplit} for the servers */ - public CompactSplitThread getCompactSplitThread() { + public CompactSplit getCompactSplitThread() { return this.compactSplitThread; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index e6c2a49..ec8e212 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -122,6 +122,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegion import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -1417,7 +1419,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } requestCount.increment(); - LOG.info("Close and offline " + encodedRegionNameList + " regions."); + LOG.info("Close and offline for split/merge " + encodedRegionNameList); boolean closed = regionServer.closeAndOfflineRegionForSplitOrMerge(encodedRegionNameList); CloseRegionForSplitOrMergeResponse.Builder builder = CloseRegionForSplitOrMergeResponse.newBuilder().setClosed(closed); @@ -1741,8 +1743,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // The region is already online. This should not happen any more. String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which is already online"; - regionServer.abort(error); - throw new IOException(error); + LOG.warn(error); + //regionServer.abort(error); + //throw new IOException(error); + builder.addOpeningState(RegionOpeningState.OPENED); + continue; } LOG.info("Open " + region.getRegionNameAsString()); @@ -3182,4 +3187,20 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return UpdateConfigurationResponse.getDefaultInstance(); } + @Override + public ExecuteProceduresResponse executeProcedures(RpcController controller, + ExecuteProceduresRequest request) throws ServiceException { + ExecuteProceduresResponse.Builder builder = ExecuteProceduresResponse.newBuilder(); + if (request.getOpenRegionCount() > 0) { + for (OpenRegionRequest req: request.getOpenRegionList()) { + builder.addOpenRegion(openRegion(controller, req)); + } + } + if (request.getCloseRegionCount() > 0) { + for (CloseRegionRequest req: request.getCloseRegionList()) { + builder.addCloseRegion(closeRegion(controller, req)); + } + } + return builder.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 4d44187..aa8d8b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -561,7 +561,7 @@ public class HBaseFsck extends Configured implements Closeable { errors.print("Number of requests: " + status.getRequestsCount()); errors.print("Number of regions: " + status.getRegionsCount()); - Set rits = status.getRegionsInTransition(); + List rits = status.getRegionsInTransition(); errors.print("Number of regions in transition: " + rits.size()); if (details) { for (RegionState state: rits) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index f11d38b..9fc4823 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -41,7 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; /** * Utility methods for interacting with the regions. @@ -225,7 +225,7 @@ public abstract class ModifyRegionUtils { static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, final String threadNamePrefix, int regionNumber) { int maxThreads = Math.min(regionNumber, conf.getInt( - "hbase.hregion.open.and.init.threads.max", 10)); + "hbase.hregion.open.and.init.threads.max", 16)); ThreadPoolExecutor regionOpenAndInitThreadPool = Threads .getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { @@ -238,24 +238,4 @@ public abstract class ModifyRegionUtils { }); return regionOpenAndInitThreadPool; } - - /** - * Triggers a bulk assignment of the specified regions - * - * @param assignmentManager the Assignment Manger - * @param regionInfos the list of regions to assign - * @throws IOException if an error occurred during the assignment - */ - public static void assignRegions(final AssignmentManager assignmentManager, - final List regionInfos) throws IOException { - try { - assignmentManager.getRegionStates().createRegionStates(regionInfos); - assignmentManager.assign(regionInfos); - } catch (InterruptedException e) { - LOG.error("Caught " + e + " during round-robin assignment"); - InterruptedIOException ie = new InterruptedIOException(e.getMessage()); - ie.initCause(e); - throw ie; - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 2cf2c6b..fadd09b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -696,7 +696,8 @@ public class WALSplitter { */ public static long writeRegionSequenceIdFile(final FileSystem fs, final Path regiondir, long newSeqId, long saftyBumper) throws IOException { - + // TODO: Why are we using a method in here as part of our normal region open where + // there is no splitting involved? Fix. St.Ack 01/20/2017. Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); long maxSeqId = 0; FileStatus[] files = null; @@ -733,7 +734,7 @@ public class WALSplitter { throw new IOException("Failed to create SeqId file:" + newSeqIdFile); } if (LOG.isDebugEnabled()) { - LOG.debug("Wrote region seqId=" + newSeqIdFile + " to file, newSeqId=" + newSeqId + LOG.debug("Wrote file=" + newSeqIdFile + ", newSeqId=" + newSeqId + ", maxSeqId=" + maxSeqId); } } catch (FileAlreadyExistsException ignored) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index aaee994..b2990d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -89,9 +89,9 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -3935,8 +3935,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { if (master == null) return false; AssignmentManager am = master.getAssignmentManager(); if (am == null) return false; - final RegionStates regionStates = am.getRegionStates(); - return !regionStates.isRegionsInTransition(); + return !am.hasRegionsInTransition(); } }; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 03c5524..ea1f4c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -21,13 +21,18 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.testclassification.FlakeyTests; @@ -43,12 +48,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - /** * Test whether region re-balancing works. (HBASE-71) */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index f694210..0b5b868 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -42,23 +42,18 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; -import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.exceptions.MergeRegionException; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -103,7 +98,7 @@ public class TestAdmin1 { @Before public void setUp() throws Exception { - this.admin = TEST_UTIL.getHBaseAdmin(); + this.admin = TEST_UTIL.getAdmin(); } @After @@ -751,7 +746,7 @@ public class TestAdmin1 { desc = new HTableDescriptor(TABLE_2); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); - admin = TEST_UTIL.getHBaseAdmin(); + admin = TEST_UTIL.getAdmin(); admin.createTable(desc, startKey, endKey, expectedRegions); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_2)) { @@ -806,7 +801,7 @@ public class TestAdmin1 { desc = new HTableDescriptor(TABLE_3); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); - admin = TEST_UTIL.getHBaseAdmin(); + admin = TEST_UTIL.getAdmin(); admin.createTable(desc, startKey, endKey, expectedRegions); @@ -992,7 +987,7 @@ public class TestAdmin1 { sb.append("_").append(Integer.toString(rowCounts[i])); } assertFalse(admin.tableExists(tableName)); - try(final Table table = TEST_UTIL.createTable(tableName, familyNames, + try (final Table table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize); final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index eb15d91..3898d96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 5ff7e77..ad4cb83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -1036,7 +1036,7 @@ public class TestHCM { Assert.assertNotNull(curServer.getOnlineRegion(regionName)); Assert.assertNull(destServer.getOnlineRegion(regionName)); Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster(). - getAssignmentManager().getRegionStates().isRegionsInTransition()); + getAssignmentManager().hasRegionsInTransition()); // Moving. It's possible that we don't have all the regions online at this point, so // the test must depends only on the region we're looking at. @@ -1049,7 +1049,7 @@ public class TestHCM { while (destServer.getOnlineRegion(regionName) == null || destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || - master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { + master.getAssignmentManager().hasRegionsInTransition()) { // wait for the move to be finished Thread.sleep(1); } @@ -1108,7 +1108,7 @@ public class TestHCM { while (curServer.getOnlineRegion(regionName) == null || destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || - master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { + master.getAssignmentManager().hasRegionsInTransition()) { // wait for the move to be finished Thread.sleep(1); } @@ -1353,7 +1353,7 @@ public class TestHCM { Assert.assertNotNull(curServer.getOnlineRegion(regionName)); Assert.assertNull(destServer.getOnlineRegion(regionName)); Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster(). - getAssignmentManager().getRegionStates().isRegionsInTransition()); + getAssignmentManager().hasRegionsInTransition()); // Moving. It's possible that we don't have all the regions online at this point, so // the test must depends only on the region we're looking at. @@ -1366,7 +1366,7 @@ public class TestHCM { while (destServer.getOnlineRegion(regionName) == null || destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) || - master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { + master.getAssignmentManager().hasRegionsInTransition()) { // wait for the move to be finished Thread.sleep(1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 527c910..63895c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -449,9 +449,6 @@ public class TestMetaWithReplicas { // Create a meta replica (this will be the 4th one) and assign it HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica( HRegionInfo.FIRST_META_REGIONINFO, 3); - // create in-memory state otherwise master won't assign - TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() - .getRegionStates().createRegionState(h); TEST_UTIL.assignRegion(h); HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getAdmin(), h); // check that problem exists diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index 42fecfb..d12e3d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -38,9 +38,6 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; import org.apache.hadoop.hbase.filter.ColumnRangeFilter; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -628,11 +625,8 @@ public class TestScannersFromClientSide { } // Now open the region again. - HMaster master = cluster.getMaster(); - RegionStates states = master.getAssignmentManager().getRegionStates(); - states.regionOffline(hri); - states.updateRegionState(hri, State.OPENING); - ProtobufUtil.openRegion(null, rs.getRSRpcServices(), rs.getServerName(), hri); + cluster.getMaster().getAssignmentManager().reopen(hri); + startTime = EnvironmentEdgeManager.currentTime(); while (true) { if (rs.getOnlineRegion(regionName) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java index f587d20..46e8ac9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.regionserver.HRegionServer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index a2cd50c..c9609dd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -247,5 +247,4 @@ public class TestChangingEncoding { verifyAllData(); } } - -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java index 3535d23..3e1eb25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java @@ -50,12 +50,12 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.ipc.RpcServer.Call; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandlerImpl; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; import org.apache.hadoop.hbase.testclassification.RPCTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -76,11 +76,11 @@ import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; @Category({RPCTests.class, SmallTests.class}) -public class TestSimpleRpcScheduler {/* +public class TestSimpleRpcScheduler { @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). - withLookingForStuckThread(true).build();*/ + withLookingForStuckThread(true).build(); private static final Log LOG = LogFactory.getLog(TestSimpleRpcScheduler.class); @@ -457,7 +457,6 @@ public class TestSimpleRpcScheduler {/* for (int i = 0; i < 100; i++) { long time = System.currentTimeMillis(); envEdge.timeQ.put(time); - long now = System.currentTimeMillis(); CallRunner cr = getMockedCallRunner(time, 2); // LOG.info("" + i + " " + (System.currentTimeMillis() - now) + " cr=" + cr); scheduler.dispatch(cr); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 683e9b3..1bcc5a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.locking.LockManager; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 467d4a5..57f72b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegion import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -719,4 +721,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { public SecureBulkLoadManager getSecureBulkLoadManager() { return null; } + + @Override + public ExecuteProceduresResponse executeProcedures(RpcController controller, + ExecuteProceduresRequest request) throws ServiceException { + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index 5100a2b..4ba5364 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MasterTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java deleted file mode 100644 index 97f7865..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ /dev/null @@ -1,1404 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; -import org.apache.hadoop.hbase.ServerLoad; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; -import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.zookeeper.KeeperException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.junit.rules.TestName; - -/** - * This tests AssignmentManager with a testing cluster. - */ -@SuppressWarnings("deprecation") -@Category({MasterTests.class, MediumTests.class}) -public class TestAssignmentManagerOnCluster { - private final static byte[] FAMILY = Bytes.toBytes("FAMILY"); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - final static Configuration conf = TEST_UTIL.getConfiguration(); - private static Admin admin; - - @Rule - public TestName name = new TestName(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - // Using the our load balancer to control region plans - conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - MyLoadBalancer.class, LoadBalancer.class); - conf.setClass(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - MyRegionObserver.class, RegionObserver.class); - // Reduce the maximum attempts to speed up the test - conf.setInt("hbase.assignment.maximum.attempts", 3); - conf.setInt("hbase.master.maximum.ping.server.attempts", 3); - conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1); - - TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class); - admin = TEST_UTIL.getAdmin(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - /** - * This tests restarting meta regionserver - */ - @Test (timeout=180000) - public void testRestartMetaRegionServer() throws Exception { - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - boolean stoppedARegionServer = false; - try { - HMaster master = cluster.getMaster(); - RegionStates regionStates = master.getAssignmentManager().getRegionStates(); - ServerName metaServerName = regionStates.getRegionServerOfRegion( - HRegionInfo.FIRST_META_REGIONINFO); - if (master.getServerName().equals(metaServerName)) { - // Move meta off master - metaServerName = cluster.getLiveRegionServerThreads() - .get(0).getRegionServer().getServerName(); - master.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), - Bytes.toBytes(metaServerName.getServerName())); - TEST_UTIL.waitUntilNoRegionsInTransition(60000); - } - RegionState metaState = - MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", - metaState.getState(), RegionState.State.OPEN); - assertNotEquals("Meta should be moved off master", - metaState.getServerName(), master.getServerName()); - assertEquals("Meta should be on the meta server", - metaState.getServerName(), metaServerName); - cluster.killRegionServer(metaServerName); - stoppedARegionServer = true; - cluster.waitForRegionServerToStop(metaServerName, 60000); - - // Wait for SSH to finish - final ServerName oldServerName = metaServerName; - final ServerManager serverManager = master.getServerManager(); - TEST_UTIL.waitFor(120000, 200, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return serverManager.isServerDead(oldServerName) - && !serverManager.areDeadServersInProgress(); - } - }); - - TEST_UTIL.waitUntilNoRegionsInTransition(60000); - // Now, make sure meta is assigned - assertTrue("Meta should be assigned", - regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); - // Now, make sure meta is registered in zk - metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", - metaState.getState(), RegionState.State.OPEN); - assertEquals("Meta should be assigned", metaState.getServerName(), - regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO)); - assertNotEquals("Meta should be assigned on a different server", - metaState.getServerName(), metaServerName); - } finally { - if (stoppedARegionServer) { - cluster.startRegionServer(); - } - } - } - - /** - * This tests region assignment - */ - @Test (timeout=60000) - public void testAssignRegion() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - TEST_UTIL.assignRegion(hri); - - RegionStates regionStates = am.getRegionStates(); - ServerName serverName = regionStates.getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - - // Region is assigned now. Let's assign it again. - // Master should not abort, and region should be assigned. - TEST_UTIL.getAdmin().assign(hri.getRegionName()); - master.getAssignmentManager().waitForAssignment(hri); - RegionState newState = regionStates.getRegionState(hri); - assertTrue(newState.isOpened()); - } finally { - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests region assignment on a simulated restarted server - */ - @Test (timeout=120000) - public void testAssignRegionOnRestartedServer() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20); - TEST_UTIL.getMiniHBaseCluster().stopMaster(0); - //restart the master so that conf take into affect - TEST_UTIL.getMiniHBaseCluster().startMaster(); - - ServerName deadServer = null; - HMaster master = null; - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - final HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - master = TEST_UTIL.getHBaseCluster().getMaster(); - Set onlineServers = master.getServerManager().getOnlineServers().keySet(); - assertFalse("There should be some servers online", onlineServers.isEmpty()); - - // Use the first server as the destination server - ServerName destServer = onlineServers.iterator().next(); - - // Created faked dead server that is still online in master - deadServer = ServerName.valueOf(destServer.getHostname(), - destServer.getPort(), destServer.getStartcode() - 100L); - master.getServerManager().recordNewServerWithLock(deadServer, ServerLoad.EMPTY_SERVERLOAD); - - final AssignmentManager am = master.getAssignmentManager(); - RegionPlan plan = new RegionPlan(hri, null, deadServer); - am.addPlan(hri.getEncodedName(), plan); - TEST_UTIL.assignRegion(hri); - - TEST_UTIL.waitFor(60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return ! am.getRegionStates().isRegionInTransition(hri); - } - }); - - assertFalse("Region should be assigned", am.getRegionStates().isRegionInTransition(hri)); - } finally { - if (deadServer != null) { - master.getServerManager().expireServer(deadServer); - } - - TEST_UTIL.deleteTable(tableName); - - // reset the value for other tests - TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 3); - ServerName masterServerName = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerName(); - TEST_UTIL.getMiniHBaseCluster().stopMaster(masterServerName); - TEST_UTIL.getMiniHBaseCluster().startMaster(); - } - } - - /** - * This tests offlining a region - */ - @Test (timeout=60000) - public void testOfflineRegion() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HRegionInfo hri = createTableAndGetOneRegion(tableName); - - RegionStates regionStates = TEST_UTIL.getHBaseCluster(). - getMaster().getAssignmentManager().getRegionStates(); - ServerName serverName = regionStates.getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - admin.offline(hri.getRegionName()); - - long timeoutTime = System.currentTimeMillis() + 800; - while (true) { - if (regionStates.getRegionByStateOfTable(tableName) - .get(RegionState.State.OFFLINE).contains(hri)) - break; - long now = System.currentTimeMillis(); - if (now > timeoutTime) { - fail("Failed to offline the region in time"); - break; - } - Thread.sleep(10); - } - RegionState regionState = regionStates.getRegionState(hri); - assertTrue(regionState.isOffline()); - } finally { - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests moving a region - */ - @Test (timeout=50000) - public void testMoveRegion() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HRegionInfo hri = createTableAndGetOneRegion(tableName); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - RegionStates regionStates = master.getAssignmentManager().getRegionStates(); - ServerName serverName = regionStates.getRegionServerOfRegion(hri); - ServerManager serverManager = master.getServerManager(); - ServerName destServerName = null; - List regionServers = - TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads(); - for (JVMClusterUtil.RegionServerThread regionServer: regionServers) { - HRegionServer destServer = regionServer.getRegionServer(); - destServerName = destServer.getServerName(); - if (!destServerName.equals(serverName) - && serverManager.isServerOnline(destServerName)) { - break; - } - } - assertTrue(destServerName != null - && !destServerName.equals(serverName)); - TEST_UTIL.getAdmin().move(hri.getEncodedNameAsBytes(), - Bytes.toBytes(destServerName.getServerName())); - - long timeoutTime = System.currentTimeMillis() + 30000; - while (true) { - ServerName sn = regionStates.getRegionServerOfRegion(hri); - if (sn != null && sn.equals(destServerName)) { - TEST_UTIL.assertRegionOnServer(hri, sn, 200); - break; - } - long now = System.currentTimeMillis(); - if (now > timeoutTime) { - fail("Failed to move the region in time: " - + regionStates.getRegionState(hri)); - } - regionStates.waitForUpdate(50); - } - - } finally { - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * If a table is deleted, we should not be able to move it anymore. - * Otherwise, the region will be brought back. - * @throws Exception - */ - @Test (timeout=50000) - public void testMoveRegionOfDeletedTable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - Admin admin = TEST_UTIL.getAdmin(); - try { - HRegionInfo hri = createTableAndGetOneRegion(tableName); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - RegionStates regionStates = am.getRegionStates(); - ServerName serverName = regionStates.getRegionServerOfRegion(hri); - ServerName destServerName = null; - for (int i = 0; i < 3; i++) { - HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(i); - if (!destServer.getServerName().equals(serverName)) { - destServerName = destServer.getServerName(); - break; - } - } - assertTrue(destServerName != null - && !destServerName.equals(serverName)); - - TEST_UTIL.deleteTable(tableName); - - try { - admin.move(hri.getEncodedNameAsBytes(), - Bytes.toBytes(destServerName.getServerName())); - fail("We should not find the region"); - } catch (IOException ioe) { - assertTrue(ioe instanceof UnknownRegionException); - } - - am.balance(new RegionPlan(hri, serverName, destServerName)); - assertFalse("The region should not be in transition", - regionStates.isRegionInTransition(hri)); - } finally { - if (admin.tableExists(tableName)) { - TEST_UTIL.deleteTable(tableName); - } - } - } - - HRegionInfo createTableAndGetOneRegion( - final TableName tableName) throws IOException, InterruptedException { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5); - - // wait till the table is assigned - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - long timeoutTime = System.currentTimeMillis() + 1000; - while (true) { - List regions = master.getAssignmentManager(). - getRegionStates().getRegionsOfTable(tableName); - if (regions.size() > 3) { - return regions.get(2); - } - long now = System.currentTimeMillis(); - if (now > timeoutTime) { - fail("Could not find an online region"); - } - Thread.sleep(10); - } - } - - /** - * This tests assign a region while it's closing. - */ - @Test (timeout=60000) - public void testAssignWhileClosing() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - assertTrue(TEST_UTIL.assignRegion(hri)); - - ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, sn, 6000); - MyRegionObserver.preCloseEnabled.set(true); - am.unassign(hri); - RegionState state = am.getRegionStates().getRegionState(hri); - assertEquals(RegionState.State.FAILED_CLOSE, state.getState()); - - MyRegionObserver.preCloseEnabled.set(false); - am.unassign(hri); - - // region is closing now, will be re-assigned automatically. - // now, let's forcefully assign it again. it should be - // assigned properly and no double-assignment - am.assign(hri, true); - - // let's check if it's assigned after it's out of transition - am.waitOnRegionToClearRegionsInTransition(hri); - assertTrue(am.waitForAssignment(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 200); - } finally { - MyRegionObserver.preCloseEnabled.set(false); - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests region close failed - */ - @Test (timeout=60000) - public void testCloseFailed() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - assertTrue(TEST_UTIL.assignRegion(hri)); - ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, sn, 6000); - - MyRegionObserver.preCloseEnabled.set(true); - am.unassign(hri); - RegionState state = am.getRegionStates().getRegionState(hri); - assertEquals(RegionState.State.FAILED_CLOSE, state.getState()); - - MyRegionObserver.preCloseEnabled.set(false); - am.unassign(hri); - - // region may still be assigned now since it's closing, - // let's check if it's assigned after it's out of transition - am.waitOnRegionToClearRegionsInTransition(hri); - - // region should be closed and re-assigned - assertTrue(am.waitForAssignment(hri)); - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - } finally { - MyRegionObserver.preCloseEnabled.set(false); - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests region open failed - */ - @Test (timeout=60000) - public void testOpenFailed() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - MyLoadBalancer.controledRegion = hri; - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - assertFalse(TEST_UTIL.assignRegion(hri)); - - RegionState state = am.getRegionStates().getRegionState(hri); - assertEquals(RegionState.State.FAILED_OPEN, state.getState()); - // Failed to open since no plan, so it's on no server - assertNull(state.getServerName()); - - MyLoadBalancer.controledRegion = null; - assertTrue(TEST_UTIL.assignRegion(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - } finally { - MyLoadBalancer.controledRegion = null; - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests round-robin assignment failed due to no bulkplan - */ - @Test (timeout=60000) - public void testRoundRobinAssignmentFailed() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = admin.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - - // round-robin assignment but balancer cannot find a plan - // assignment should fail - MyLoadBalancer.controledRegion = hri; - // if bulk assignment cannot update region state to online - // or failed_open this waits until timeout - assertFalse(TEST_UTIL.assignRegion(hri)); - RegionState state = am.getRegionStates().getRegionState(hri); - assertEquals(RegionState.State.FAILED_OPEN, state.getState()); - // Failed to open since no plan, so it's on no server - assertNull(state.getServerName()); - - // try again with valid plan - MyLoadBalancer.controledRegion = null; - assertTrue(TEST_UTIL.assignRegion(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - } finally { - MyLoadBalancer.controledRegion = null; - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests retain assignment failed due to no bulkplan - */ - @Test (timeout=60000) - public void testRetainAssignmentFailed() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - MyLoadBalancer.controledRegion = hri; - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - - Map regions = new HashMap(); - ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); - regions.put(hri, dest); - // retainAssignment but balancer cannot find a plan - // assignment should fail - am.assign(regions); - - // if retain assignment cannot update region state to online - // or failed_open this waits until timeout - assertFalse(am.waitForAssignment(hri)); - RegionState state = am.getRegionStates().getRegionState(hri); - assertEquals(RegionState.State.FAILED_OPEN, state.getState()); - // Failed to open since no plan, so it's on no server - assertNull(state.getServerName()); - - // try retainAssigment again with valid plan - MyLoadBalancer.controledRegion = null; - am.assign(regions); - assertTrue(am.waitForAssignment(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - - // it retains on same server as specified - assertEquals(serverName, dest); - } finally { - MyLoadBalancer.controledRegion = null; - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests region open failure which is not recoverable - */ - @Test (timeout=60000) - public void testOpenFailedUnrecoverable() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - FileSystem fs = FileSystem.get(conf); - Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), tableName); - Path regionDir = new Path(tableDir, hri.getEncodedName()); - // create a file named the same as the region dir to - // mess up with region opening - fs.create(regionDir, true); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - assertFalse(TEST_UTIL.assignRegion(hri)); - - RegionState state = am.getRegionStates().getRegionState(hri); - assertEquals(RegionState.State.FAILED_OPEN, state.getState()); - // Failed to open due to file system issue. Region state should - // carry the opening region server so that we can force close it - // later on before opening it again. See HBASE-9092. - assertNotNull(state.getServerName()); - - // remove the blocking file, so that region can be opened - fs.delete(regionDir, true); - assertTrue(TEST_UTIL.assignRegion(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - } finally { - TEST_UTIL.deleteTable(tableName); - } - } - - @Test (timeout=60000) - public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws Exception { - final TableName table = - TableName.valueOf - ("testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState"); - AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); - HRegionInfo hri = null; - ServerName serverName = null; - try { - hri = createTableAndGetOneRegion(table); - serverName = am.getRegionStates().getRegionServerOfRegion(hri); - ServerName destServerName = null; - HRegionServer destServer = null; - for (int i = 0; i < 3; i++) { - destServer = TEST_UTIL.getHBaseCluster().getRegionServer(i); - if (!destServer.getServerName().equals(serverName)) { - destServerName = destServer.getServerName(); - break; - } - } - am.regionOffline(hri); - am.getRegionStates().updateRegionState(hri, RegionState.State.PENDING_OPEN, destServerName); - - am.getTableStateManager().setTableState(table, TableState.State.DISABLING); - List toAssignRegions = am.cleanOutCrashedServerReferences(destServerName); - assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty()); - assertTrue("Regions to be assigned should be empty.", am.getRegionStates() - .getRegionState(hri).isOffline()); - } finally { - if (hri != null && serverName != null) { - am.regionOnline(hri, serverName); - } - am.getTableStateManager().setTableState(table, TableState.State.ENABLED); - TEST_UTIL.getAdmin().disableTable(table); - TEST_UTIL.deleteTable(table); - } - } - - /** - * This tests region close hanging - */ - @Test (timeout=60000) - public void testCloseHang() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - assertTrue(TEST_UTIL.assignRegion(hri)); - ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, sn, 6000); - - MyRegionObserver.postCloseEnabled.set(true); - am.unassign(hri); - - // Let region closing move ahead. The region should be closed - // properly and re-assigned automatically - MyRegionObserver.postCloseEnabled.set(false); - - // region may still be assigned now since it's closing, - // let's check if it's assigned after it's out of transition - am.waitOnRegionToClearRegionsInTransition(hri); - - // region should be closed and re-assigned - assertTrue(am.waitForAssignment(hri)); - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - } finally { - MyRegionObserver.postCloseEnabled.set(false); - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * This tests region close racing with open - */ - @Test (timeout=60000) - public void testOpenCloseRacing() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - meta.close(); - - MyRegionObserver.postOpenEnabled.set(true); - MyRegionObserver.postOpenCalled = false; - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - // Region will be opened, but it won't complete - am.assign(hri); - long end = EnvironmentEdgeManager.currentTime() + 20000; - // Wait till postOpen is called - while (!MyRegionObserver.postOpenCalled ) { - assertFalse("Timed out waiting for postOpen to be called", - EnvironmentEdgeManager.currentTime() > end); - Thread.sleep(300); - } - - // Now let's unassign it, it should do nothing - am.unassign(hri); - RegionState state = am.getRegionStates().getRegionState(hri); - ServerName oldServerName = state.getServerName(); - assertTrue(state.isOpening() && oldServerName != null); - - // Now the region is stuck in opening - // Let's forcefully re-assign it to trigger closing/opening - // racing. This test is to make sure this scenario - // is handled properly. - MyRegionObserver.postOpenEnabled.set(false); - ServerName destServerName = null; - int numRS = TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size(); - for (int i = 0; i < numRS; i++) { - HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(i); - if (!destServer.getServerName().equals(oldServerName)) { - destServerName = destServer.getServerName(); - break; - } - } - assertNotNull(destServerName); - assertFalse("Region should be assigned on a new region server", - oldServerName.equals(destServerName)); - List regions = new ArrayList(); - regions.add(hri); - am.assign(destServerName, regions); - - // let's check if it's assigned after it's out of transition - am.waitOnRegionToClearRegionsInTransition(hri); - assertTrue(am.waitForAssignment(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 6000); - } finally { - MyRegionObserver.postOpenEnabled.set(false); - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * Test force unassign/assign a region hosted on a dead server - */ - @Test (timeout=60000) - public void testAssignRacingWithSSH() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - MyMaster master = null; - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - // Assign the region - master = (MyMaster)cluster.getMaster(); - AssignmentManager am = master.getAssignmentManager(); - - am.assign(hri); - - // Hold SSH before killing the hosting server - master.enableSSH(false); - - - RegionStates regionStates = am.getRegionStates(); - ServerName metaServer = regionStates.getRegionServerOfRegion( - HRegionInfo.FIRST_META_REGIONINFO); - while (true) { - assertTrue(am.waitForAssignment(hri)); - RegionState state = regionStates.getRegionState(hri); - ServerName oldServerName = state.getServerName(); - if (!ServerName.isSameHostnameAndPort(oldServerName, metaServer)) { - // Kill the hosting server, which doesn't have meta on it. - cluster.killRegionServer(oldServerName); - cluster.waitForRegionServerToStop(oldServerName, -1); - break; - } - int i = cluster.getServerWithMeta(); - HRegionServer rs = cluster.getRegionServer(i == 0 ? 1 : 0); - oldServerName = rs.getServerName(); - master.move(hri.getEncodedNameAsBytes(), - Bytes.toBytes(oldServerName.getServerName())); - } - - // You can't assign a dead region before SSH - am.assign(hri, true); - RegionState state = regionStates.getRegionState(hri); - assertTrue(state.isFailedClose()); - - // You can't unassign a dead region before SSH either - am.unassign(hri); - state = regionStates.getRegionState(hri); - assertTrue(state.isFailedClose()); - - // Enable SSH so that log can be split - master.enableSSH(true); - - // let's check if it's assigned after it's out of transition. - // no need to assign it manually, SSH should do it - am.waitOnRegionToClearRegionsInTransition(hri); - assertTrue(am.waitForAssignment(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 6000); - } finally { - if (master != null) { - master.enableSSH(true); - } - TEST_UTIL.deleteTable(tableName); - cluster.startRegionServer(); - } - } - - /** - * Test SSH waiting for extra region server for assignment - */ - @Test (timeout=300000) - public void testSSHWaitForServerToAssignRegion() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - boolean startAServer = false; - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - HMaster master = cluster.getMaster(); - final ServerManager serverManager = master.getServerManager(); - MyLoadBalancer.countRegionServers = Integer.valueOf( - serverManager.countOfRegionServers()); - HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName); - assertNotNull("First region should be assigned", rs); - final ServerName serverName = rs.getServerName(); - // Wait till SSH tried to assign regions a several times - int counter = MyLoadBalancer.counter.get() + 5; - cluster.killRegionServer(serverName); - startAServer = true; - cluster.waitForRegionServerToStop(serverName, -1); - while (counter > MyLoadBalancer.counter.get()) { - Thread.sleep(1000); - } - cluster.startRegionServer(); - startAServer = false; - // Wait till the dead server is processed by SSH - TEST_UTIL.waitFor(120000, 1000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return serverManager.isServerDead(serverName) - && !serverManager.areDeadServersInProgress(); - } - }); - TEST_UTIL.waitUntilNoRegionsInTransition(300000); - - rs = TEST_UTIL.getRSForFirstRegionInTable(tableName); - assertTrue("First region should be re-assigned to a different server", - rs != null && !serverName.equals(rs.getServerName())); - } finally { - MyLoadBalancer.countRegionServers = null; - TEST_UTIL.deleteTable(tableName); - if (startAServer) { - cluster.startRegionServer(); - } - } - } - - /** - * Test force unassign/assign a region of a disabled table - */ - @Test (timeout=60000) - public void testAssignDisabledRegion() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - MyMaster master = null; - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - // Assign the region - master = (MyMaster)cluster.getMaster(); - AssignmentManager am = master.getAssignmentManager(); - RegionStates regionStates = am.getRegionStates(); - assertTrue(TEST_UTIL.assignRegion(hri)); - - // Disable the table - admin.disableTable(tableName); - assertTrue(regionStates.isRegionOffline(hri)); - - // You can't assign a disabled region - am.assign(hri, true); - assertTrue(regionStates.isRegionOffline(hri)); - - // You can't unassign a disabled region either - am.unassign(hri); - assertTrue(regionStates.isRegionOffline(hri)); - } finally { - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * Test offlined region is assigned by SSH - */ - @Test (timeout=60000) - public void testAssignOfflinedRegionBySSH() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - MyMaster master = null; - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - // Assign the region - master = (MyMaster)cluster.getMaster(); - AssignmentManager am = master.getAssignmentManager(); - am.assign(hri); - - RegionStates regionStates = am.getRegionStates(); - ServerName metaServer = regionStates.getRegionServerOfRegion( - HRegionInfo.FIRST_META_REGIONINFO); - ServerName oldServerName = null; - while (true) { - assertTrue(am.waitForAssignment(hri)); - RegionState state = regionStates.getRegionState(hri); - oldServerName = state.getServerName(); - if (!ServerName.isSameHostnameAndPort(oldServerName, metaServer)) { - // Mark the hosting server aborted, but don't actually kill it. - // It doesn't have meta on it. - MyRegionServer.abortedServer = oldServerName; - break; - } - int i = cluster.getServerWithMeta(); - HRegionServer rs = cluster.getRegionServer(i == 0 ? 1 : 0); - oldServerName = rs.getServerName(); - master.move(hri.getEncodedNameAsBytes(), - Bytes.toBytes(oldServerName.getServerName())); - } - - // Make sure the region is assigned on the dead server - assertTrue(regionStates.isRegionOnline(hri)); - assertEquals(oldServerName, regionStates.getRegionServerOfRegion(hri)); - - // Kill the hosting server, which doesn't have meta on it. - cluster.killRegionServer(oldServerName); - cluster.waitForRegionServerToStop(oldServerName, -1); - - ServerManager serverManager = master.getServerManager(); - while (!serverManager.isServerDead(oldServerName) - || serverManager.getDeadServers().areDeadServersInProgress()) { - Thread.sleep(100); - } - - // Let's check if it's assigned after it's out of transition. - // no need to assign it manually, SSH should do it - am.waitOnRegionToClearRegionsInTransition(hri); - assertTrue(am.waitForAssignment(hri)); - - ServerName serverName = master.getAssignmentManager(). - getRegionStates().getRegionServerOfRegion(hri); - TEST_UTIL.assertRegionOnlyOnServer(hri, serverName, 200); - } finally { - MyRegionServer.abortedServer = null; - TEST_UTIL.deleteTable(tableName); - cluster.startRegionServer(); - } - } - - /** - * Test disabled region is ignored by SSH - */ - @Test (timeout=60000) - public void testAssignDisabledRegionBySSH() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - MyMaster master; - try { - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = new HRegionInfo( - desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - - // Assign the region - master = (MyMaster)cluster.getMaster(); - AssignmentManager am = master.getAssignmentManager(); - am.assign(hri); - - RegionStates regionStates = am.getRegionStates(); - ServerName metaServer = regionStates.getRegionServerOfRegion( - HRegionInfo.FIRST_META_REGIONINFO); - ServerName oldServerName = null; - while (true) { - assertTrue(am.waitForAssignment(hri)); - RegionState state = regionStates.getRegionState(hri); - oldServerName = state.getServerName(); - if (!ServerName.isSameHostnameAndPort(oldServerName, metaServer)) { - // Mark the hosting server aborted, but don't actually kill it. - // It doesn't have meta on it. - MyRegionServer.abortedServer = oldServerName; - break; - } - int i = cluster.getServerWithMeta(); - HRegionServer rs = cluster.getRegionServer(i == 0 ? 1 : 0); - oldServerName = rs.getServerName(); - master.move(hri.getEncodedNameAsBytes(), - Bytes.toBytes(oldServerName.getServerName())); - } - - // Make sure the region is assigned on the dead server - assertTrue(regionStates.isRegionOnline(hri)); - assertEquals(oldServerName, regionStates.getRegionServerOfRegion(hri)); - - // Disable the table now. - master.disableTable(hri.getTable(), HConstants.NO_NONCE, HConstants.NO_NONCE); - - // Kill the hosting server, which doesn't have meta on it. - cluster.killRegionServer(oldServerName); - cluster.waitForRegionServerToStop(oldServerName, -1); - - ServerManager serverManager = master.getServerManager(); - while (!serverManager.isServerDead(oldServerName) - || serverManager.getDeadServers().areDeadServersInProgress()) { - Thread.sleep(100); - } - - // Wait till no more RIT, the region should be offline. - TEST_UTIL.waitUntilNoRegionsInTransition(60000); - assertTrue(regionStates.isRegionOffline(hri)); - } finally { - MyRegionServer.abortedServer = null; - TEST_UTIL.deleteTable(tableName); - cluster.startRegionServer(); - } - } - - /** - * Test that region state transition call is idempotent - */ - @Test(timeout = 60000) - public void testReportRegionStateTransition() throws Exception { - final TableName tableName = TableName.valueOf(name.getMethodName()); - try { - MyRegionServer.simulateRetry = true; - HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor(FAMILY)); - admin.createTable(desc); - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HRegionInfo hri = - new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); - MetaTableAccessor.addRegionToMeta(meta, hri); - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - AssignmentManager am = master.getAssignmentManager(); - TEST_UTIL.assignRegion(hri); - RegionStates regionStates = am.getRegionStates(); - ServerName serverName = regionStates.getRegionServerOfRegion(hri); - // Assert the the region is actually open on the server - TEST_UTIL.assertRegionOnServer(hri, serverName, 200); - // Closing region should just work fine - admin.disableTable(tableName); - assertTrue(regionStates.isRegionOffline(hri)); - List regions = TEST_UTIL.getAdmin().getOnlineRegions(serverName); - assertTrue(!regions.contains(hri)); - } finally { - MyRegionServer.simulateRetry = false; - TEST_UTIL.deleteTable(tableName); - } - } - - /** - * Test concurrent updates to meta when meta is not on master - * @throws Exception - */ - @Test(timeout = 30000) - public void testUpdatesRemoteMeta() throws Exception { - conf.setInt("hbase.regionstatestore.meta.connection", 3); - final RegionStateStore rss = - new RegionStateStore(new MyMaster(conf, new ZkCoordinatedStateManager())); - rss.start(); - // Create 10 threads and make each do 10 puts related to region state update - Thread[] th = new Thread[10]; - List nameList = new ArrayList(); - List tableNameList = new ArrayList(); - for (int i = 0; i < th.length; i++) { - th[i] = new Thread() { - @Override - public void run() { - HRegionInfo[] hri = new HRegionInfo[10]; - ServerName serverName = ServerName.valueOf("dummyhost", 1000, 1234); - for (int i = 0; i < 10; i++) { - hri[i] = new HRegionInfo(TableName.valueOf(Thread.currentThread().getName() + "_" + i)); - RegionState newState = new RegionState(hri[i], RegionState.State.OPEN, serverName); - RegionState oldState = - new RegionState(hri[i], RegionState.State.PENDING_OPEN, serverName); - rss.updateRegionState(1, newState, oldState); - } - } - }; - th[i].start(); - nameList.add(th[i].getName()); - } - for (int i = 0; i < th.length; i++) { - th[i].join(); - } - // Add all the expected table names in meta to tableNameList - for (String name : nameList) { - for (int i = 0; i < 10; i++) { - tableNameList.add(TableName.valueOf(name + "_" + i)); - } - } - List metaRows = MetaTableAccessor.fullScanRegions(admin.getConnection()); - int count = 0; - // Check all 100 rows are in meta - for (Result result : metaRows) { - if (tableNameList.contains(HRegionInfo.getTable(result.getRow()))) { - count++; - if (count == 100) { - break; - } - } - } - assertTrue(count == 100); - rss.stop(); - } - - static class MyLoadBalancer extends StochasticLoadBalancer { - // For this region, if specified, always assign to nowhere - static volatile HRegionInfo controledRegion = null; - - static volatile Integer countRegionServers = null; - static AtomicInteger counter = new AtomicInteger(0); - - @Override - public ServerName randomAssignment(HRegionInfo regionInfo, - List servers) { - if (regionInfo.equals(controledRegion)) { - return null; - } - return super.randomAssignment(regionInfo, servers); - } - - @Override - public Map> roundRobinAssignment( - List regions, List servers) { - if (countRegionServers != null && services != null) { - int regionServers = services.getServerManager().countOfRegionServers(); - if (regionServers < countRegionServers.intValue()) { - // Let's wait till more region servers join in. - // Before that, fail region assignments. - counter.incrementAndGet(); - return null; - } - } - if (regions.get(0).equals(controledRegion)) { - Map> m = Maps.newHashMap(); - m.put(LoadBalancer.BOGUS_SERVER_NAME, regions); - return m; - } - return super.roundRobinAssignment(regions, servers); - } - - @Override - public Map> retainAssignment( - Map regions, List servers) { - for (HRegionInfo hri : regions.keySet()) { - if (hri.equals(controledRegion)) { - Map> m = Maps.newHashMap(); - m.put(LoadBalancer.BOGUS_SERVER_NAME, Lists.newArrayList(regions.keySet())); - return m; - } - } - return super.retainAssignment(regions, servers); - } - } - - public static class MyMaster extends HMaster { - AtomicBoolean enabled = new AtomicBoolean(true); - - public MyMaster(Configuration conf, CoordinatedStateManager cp) - throws IOException, KeeperException, - InterruptedException { - super(conf, cp); - } - - @Override - public boolean isServerCrashProcessingEnabled() { - return enabled.get() && super.isServerCrashProcessingEnabled(); - } - - public void enableSSH(boolean enabled) { - this.enabled.set(enabled); - if (enabled) { - getServerManager().processQueuedDeadServers(); - } - } - } - - public static class MyRegionServer extends MiniHBaseClusterRegionServer { - static volatile ServerName abortedServer = null; - static volatile boolean simulateRetry = false; - - public MyRegionServer(Configuration conf, CoordinatedStateManager cp) - throws IOException, KeeperException, - InterruptedException { - super(conf, cp); - } - - @Override - public boolean reportRegionStateTransition(TransitionCode code, long openSeqNum, - HRegionInfo... hris) { - if (simulateRetry) { - // Simulate retry by calling the method twice - super.reportRegionStateTransition(code, openSeqNum, hris); - return super.reportRegionStateTransition(code, openSeqNum, hris); - } - return super.reportRegionStateTransition(code, openSeqNum, hris); - } - - @Override - public boolean isAborted() { - return getServerName().equals(abortedServer) || super.isAborted(); - } - } - - public static class MyRegionObserver extends BaseRegionObserver { - // If enabled, fail all preClose calls - static AtomicBoolean preCloseEnabled = new AtomicBoolean(false); - - // If enabled, stall postClose calls - static AtomicBoolean postCloseEnabled = new AtomicBoolean(false); - - // If enabled, stall postOpen calls - static AtomicBoolean postOpenEnabled = new AtomicBoolean(false); - - // A flag to track if postOpen is called - static volatile boolean postOpenCalled = false; - - @Override - public void preClose(ObserverContext c, - boolean abortRequested) throws IOException { - if (preCloseEnabled.get()) throw new IOException("fail preClose from coprocessor"); - } - - @Override - public void postClose(ObserverContext c, - boolean abortRequested) { - stallOnFlag(postCloseEnabled); - } - - @Override - public void postOpen(ObserverContext c) { - postOpenCalled = true; - stallOnFlag(postOpenEnabled); - } - - private void stallOnFlag(final AtomicBoolean flag) { - try { - // If enabled, stall - while (flag.get()) { - Thread.sleep(1000); - } - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } - } - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 52b58f1..f9a3e70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index d0b8494..fe9e619 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; import org.apache.hadoop.hbase.exceptions.OperationConflictException; import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index eb4ce99..d6210b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java index 74f2c91..c020064 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterBalanceThrottling.java @@ -120,8 +120,9 @@ public class TestMasterBalanceThrottling { @Override public void run() { while (!stop.get()) { - maxCount.set(Math.max(maxCount.get(), master.getAssignmentManager().getRegionStates() - .getRegionsInTransitionCount())); + maxCount.set(Math.max(maxCount.get(), + master.getAssignmentManager().getRegionStates() + .getRegionsInTransition().size())); try { Thread.sleep(10); } catch (InterruptedException e) { @@ -136,7 +137,7 @@ public class TestMasterBalanceThrottling { } private void unbalance(HMaster master, TableName tableName) throws Exception { - while (master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount() > 0) { + while (master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0) { Thread.sleep(100); } HRegionServer biasedServer = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); @@ -144,7 +145,7 @@ public class TestMasterBalanceThrottling { master.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(biasedServer.getServerName().getServerName())); } - while (master.getAssignmentManager().getRegionStates().getRegionsInTransitionCount() > 0) { + while (master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0) { Thread.sleep(100); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index f57d6b9..986e8d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -43,6 +43,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.master.assignment.RegionStateStore; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -252,21 +254,20 @@ public class TestMasterFailover { // Put the online region in pending_close. It is actually already opened. // This is to simulate that the region close RPC is not sent out before failover RegionState oldState = regionStates.getRegionState(hriOnline); - RegionState newState = new RegionState( - hriOnline, State.PENDING_CLOSE, oldState.getServerName()); + RegionState newState = new RegionState(hriOnline, State.CLOSING, oldState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState); // Put the offline region in pending_open. It is actually not opened yet. // This is to simulate that the region open RPC is not sent out before failover oldState = new RegionState(hriOffline, State.OFFLINE); - newState = new RegionState(hriOffline, State.PENDING_OPEN, newState.getServerName()); + newState = new RegionState(hriOffline, State.OPENING, newState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState); HRegionInfo failedClose = new HRegionInfo(offlineTable.getTableName(), null, null); createRegion(failedClose, rootdir, conf, offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(), failedClose); - oldState = new RegionState(failedClose, State.PENDING_CLOSE); + oldState = new RegionState(failedClose, State.CLOSING); newState = new RegionState(failedClose, State.FAILED_CLOSE, newState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState); @@ -276,7 +277,7 @@ public class TestMasterFailover { // Simulate a region transitioning to failed open when the region server reports the // transition as FAILED_OPEN - oldState = new RegionState(failedOpen, State.PENDING_OPEN); + oldState = new RegionState(failedOpen, State.OPENING); newState = new RegionState(failedOpen, State.FAILED_OPEN, newState.getServerName()); stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState); @@ -378,12 +379,12 @@ public class TestMasterFailover { assertEquals("hbase:meta should be onlined on RS", metaState.getState(), State.OPEN); - // Update meta state as PENDING_OPEN, then kill master + // Update meta state as OPENING, then kill master // that simulates, that RS successfully deployed, but // RPC was lost right before failure. // region server should expire (how it can be verified?) MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(), - rs.getServerName(), State.PENDING_OPEN); + rs.getServerName(), State.OPENING); Region meta = rs.getFromOnlineRegions(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); rs.removeFromOnlineRegions(meta, null); ((HRegion)meta).close(); @@ -410,12 +411,12 @@ public class TestMasterFailover { assertEquals("hbase:meta should be onlined on RS", metaState.getState(), State.OPEN); - // Update meta state as PENDING_CLOSE, then kill master + // Update meta state as CLOSING, then kill master // that simulates, that RS successfully deployed, but // RPC was lost right before failure. // region server should expire (how it can be verified?) MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(), - rs.getServerName(), State.PENDING_CLOSE); + rs.getServerName(), State.CLOSING); log("Aborting master"); activeMaster.abort("test-kill"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index af54ffc..9676607 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -34,6 +34,8 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -165,7 +167,7 @@ public class TestMasterStatusServlet { // Add 100 regions as in-transition TreeSet regionsInTransition = new TreeSet( - RegionStates.REGION_STATE_COMPARATOR); + RegionStates.REGION_STATE_STAMP_COMPARATOR); for (byte i = 0; i < 100; i++) { HRegionInfo hri = new HRegionInfo(FAKE_TABLE.getTableName(), new byte[]{i}, new byte[]{(byte) (i+1)}); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java index a845a73..68160df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaShutdownHandler.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKUtil; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java index daf6d43..fe5883b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionState.java @@ -35,14 +35,19 @@ public class TestRegionState { public TestName name = new TestName(); @Test - public void test() { - RegionState state1 = new RegionState( - new HRegionInfo(TableName.valueOf(name.getMethodName())), RegionState.State.OPENING); + public void testSerializeDeserialize() { + final TableName tableName = TableName.valueOf("testtb"); + for (RegionState.State state: RegionState.State.values()) { + testSerializeDeserialize(tableName, state); + } + } + + private void testSerializeDeserialize(final TableName tableName, final RegionState.State state) { + RegionState state1 = new RegionState(new HRegionInfo(tableName), state); ClusterStatusProtos.RegionState protobuf1 = state1.convert(); RegionState state2 = RegionState.convert(protobuf1); ClusterStatusProtos.RegionState protobuf2 = state1.convert(); - - assertEquals(state1, state2); - assertEquals(protobuf1, protobuf2); + assertEquals("RegionState does not match " + state, state1, state2); + assertEquals("Protobuf does not match " + state, protobuf1, protobuf2); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java deleted file mode 100644 index 17004ec..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; - -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; -import static org.junit.Assert.assertTrue; -import static junit.framework.Assert.assertFalse; -import static org.mockito.Matchers.isA; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@Category({MasterTests.class, SmallTests.class}) -public class TestRegionStates { - @Test (timeout=10000) - public void testCanMakeProgressThoughMetaIsDown() - throws IOException, InterruptedException, BrokenBarrierException { - MasterServices server = mock(MasterServices.class); - when(server.getServerName()).thenReturn(ServerName.valueOf("master,1,1")); - Connection connection = mock(ClusterConnection.class); - // Set up a table that gets 'stuck' when we try to fetch a row from the meta table. - // It is stuck on a CyclicBarrier latch. We use CyclicBarrier because it will tell us when - // thread is waiting on latch. - Table metaTable = Mockito.mock(Table.class); - final CyclicBarrier latch = new CyclicBarrier(2); - when(metaTable.get((Get)Mockito.any())).thenAnswer(new Answer() { - @Override - public Result answer(InvocationOnMock invocation) throws Throwable { - latch.await(); - throw new java.net.ConnectException("Connection refused"); - } - }); - when(connection.getTable(TableName.META_TABLE_NAME)).thenReturn(metaTable); - when(server.getConnection()).thenReturn((ClusterConnection)connection); - Configuration configuration = mock(Configuration.class); - when(server.getConfiguration()).thenReturn(configuration); - TableStateManager tsm = mock(TableStateManager.class); - ServerManager sm = mock(ServerManager.class); - when(sm.isServerOnline(isA(ServerName.class))).thenReturn(true); - - RegionStateStore rss = mock(RegionStateStore.class); - final RegionStates regionStates = new RegionStates(server, tsm, sm, rss); - final ServerName sn = mockServer("one", 1); - regionStates.updateRegionState(HRegionInfo.FIRST_META_REGIONINFO, State.SPLITTING_NEW, sn); - Thread backgroundThread = new Thread("Get stuck setting server offline") { - @Override - public void run() { - regionStates.serverOffline(sn); - } - }; - assertTrue(latch.getNumberWaiting() == 0); - backgroundThread.start(); - while (latch.getNumberWaiting() == 0); - // Verify I can do stuff with synchronized RegionStates methods, that I am not locked out. - // Below is a call that is synchronized. Can I do it and not block? - regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO); - // Done. Trip the barrier on the background thread. - latch.await(); - } - - @Test - public void testWeDontReturnDrainingServersForOurBalancePlans() throws Exception { - MasterServices server = mock(MasterServices.class); - when(server.getServerName()).thenReturn(ServerName.valueOf("master,1,1")); - Configuration configuration = mock(Configuration.class); - when(server.getConfiguration()).thenReturn(configuration); - TableStateManager tsm = mock(TableStateManager.class); - ServerManager sm = mock(ServerManager.class); - when(sm.isServerOnline(isA(ServerName.class))).thenReturn(true); - - RegionStateStore rss = mock(RegionStateStore.class); - RegionStates regionStates = new RegionStates(server, tsm, sm, rss); - - ServerName one = mockServer("one", 1); - ServerName two = mockServer("two", 1); - ServerName three = mockServer("three", 1); - - when(sm.getDrainingServersList()).thenReturn(Arrays.asList(three)); - - regionStates.regionOnline(createFakeRegion(), one); - regionStates.regionOnline(createFakeRegion(), two); - regionStates.regionOnline(createFakeRegion(), three); - - - Map>> result = - regionStates.getAssignmentsByTable(); - for (Map> map : result.values()) { - assertFalse(map.keySet().contains(three)); - } - } - - private HRegionInfo createFakeRegion() { - HRegionInfo info = mock(HRegionInfo.class); - when(info.getEncodedName()).thenReturn(UUID.randomUUID().toString()); - return info; - } - - private ServerName mockServer(String fakeHost, int fakePort) { - ServerName serverName = mock(ServerName.class); - when(serverName.getHostname()).thenReturn(fakeHost); - when(serverName.getPort()).thenReturn(fakePort); - return serverName; - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index 7c41c0f..3e3af27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java new file mode 100644 index 0000000..07b989b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/AssignmentTestingUtil.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; +import org.apache.hadoop.hbase.util.Threads; + +import static org.junit.Assert.assertEquals; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public abstract class AssignmentTestingUtil { + private static final Log LOG = LogFactory.getLog(AssignmentTestingUtil.class); + + private AssignmentTestingUtil() {} + + public static void waitForRegionToBeInTransition(final HBaseTestingUtility util, + final HRegionInfo hri) throws Exception { + while (!getMaster(util).getAssignmentManager().getRegionStates().isRegionInTransition(hri)) { + Threads.sleep(10); + } + } + + public static void waitForRsToBeDead(final HBaseTestingUtility util, + final ServerName serverName) throws Exception { + util.waitFor(60000, new ExplainingPredicate() { + @Override + public boolean evaluate() { + return getMaster(util).getServerManager().isServerDead(serverName); + } + + @Override + public String explainFailure() { + return "Server " + serverName + " is not dead"; + } + }); + } + + public static void stopRs(final HBaseTestingUtility util, final ServerName serverName) + throws Exception { + LOG.info("STOP REGION SERVER " + serverName); + util.getMiniHBaseCluster().stopRegionServer(serverName); + waitForRsToBeDead(util, serverName); + } + + public static void killRs(final HBaseTestingUtility util, final ServerName serverName) + throws Exception { + LOG.info("KILL REGION SERVER " + serverName); + util.getMiniHBaseCluster().killRegionServer(serverName); + waitForRsToBeDead(util, serverName); + } + + public static void crashRs(final HBaseTestingUtility util, final ServerName serverName, + final boolean kill) throws Exception { + if (kill) { + killRs(util, serverName); + } else { + stopRs(util, serverName); + } + } + + public static ServerName crashRsWithRegion(final HBaseTestingUtility util, + final HRegionInfo hri, final boolean kill) throws Exception { + ServerName serverName = getServerHoldingRegion(util, hri); + crashRs(util, serverName, kill); + return serverName; + } + + public static ServerName getServerHoldingRegion(final HBaseTestingUtility util, + final HRegionInfo hri) throws Exception { + ServerName serverName = util.getMiniHBaseCluster().getServerHoldingRegion( + hri.getTable(), hri.getRegionName()); + ServerName amServerName = getMaster(util).getAssignmentManager().getRegionStates() + .getRegionServerOfRegion(hri); + + // Make sure AM and MiniCluster agrees on the Server holding the region + // and that the server is online. + assertEquals(amServerName, serverName); + assertEquals(true, getMaster(util).getServerManager().isServerOnline(serverName)); + return serverName; + } + + public static boolean isServerHoldingMeta(final HBaseTestingUtility util, + final ServerName serverName) throws Exception { + for (HRegionInfo hri: getMetaRegions(util)) { + if (serverName.equals(getServerHoldingRegion(util, hri))) { + return true; + } + } + return false; + } + + public static Set getMetaRegions(final HBaseTestingUtility util) { + return getMaster(util).getAssignmentManager().getMetaRegionSet(); + } + + private static HMaster getMaster(final HBaseTestingUtility util) { + return util.getMiniHBaseCluster().getMaster(); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java new file mode 100644 index 0000000..fdee307 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; +import java.util.HashSet; +import java.util.NavigableMap; +import java.util.SortedSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.MasterWalManager; +import org.apache.hadoop.hbase.master.MockNoopMasterServices; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.security.Superusers; + +public class MockMasterServices extends MockNoopMasterServices { + private final MasterFileSystem fileSystemManager; + private final MasterWalManager walManager; + private final AssignmentManager assignmentManager; + + private MasterProcedureEnv procedureEnv; + private ProcedureExecutor procedureExecutor; + private ProcedureStore procedureStore; + + private LoadBalancer balancer; + private ServerManager serverManager; + // Set of regions on a 'server'. Populated externally. Used in below faking 'cluster'. + private final NavigableMap> regionsToRegionServers; + + public MockMasterServices(Configuration conf, + NavigableMap> regionsToRegionServers) + throws IOException { + super(conf); + this.regionsToRegionServers = regionsToRegionServers; + Superusers.initialize(conf); + this.fileSystemManager = new MasterFileSystem(this); + this.walManager = new MasterWalManager(this); + this.assignmentManager = new AssignmentManager(this, new MockRegionStateStore(this)) { + public boolean isTableEnabled(final TableName tableName) { + return true; + } + + public boolean isTableDisabled(final TableName tableName) { + return false; + } + + @Override + protected boolean waitServerReportEvent(ServerName serverName, Procedure proc) { + // Make a report with current state of the server 'serverName' before we call wait.. + SortedSet regions = regionsToRegionServers.get(serverName); + getAssignmentManager().reportOnlineRegions(serverName, 0, + regions == null? new HashSet(): regions); + return super.waitServerReportEvent(serverName, proc); + } + }; + this.balancer = LoadBalancerFactory.getLoadBalancer(conf); + this.serverManager = new ServerManager(this); + } + + public void start(final int numServes, final RSProcedureDispatcher remoteDispatcher) + throws IOException { + startProcedureExecutor(remoteDispatcher); + assignmentManager.start(); + for (int i = 0; i < numServes; ++i) { + serverManager.regionServerReport( + ServerName.valueOf("localhost", 100 + i, 1), ServerLoad.EMPTY_SERVERLOAD); + } + } + + @Override + public void stop(String why) { + stopProcedureExecutor(); + this.assignmentManager.stop(); + } + + private void startProcedureExecutor(final RSProcedureDispatcher remoteDispatcher) + throws IOException { + final Configuration conf = getConfiguration(); + final Path logDir = new Path(fileSystemManager.getRootDir(), + MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR); + + //procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir, + // new MasterProcedureEnv.WALStoreLeaseRecovery(this)); + procedureStore = new NoopProcedureStore(); + procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this)); + + procedureEnv = new MasterProcedureEnv(this, + remoteDispatcher != null ? remoteDispatcher : new RSProcedureDispatcher(this)); + + procedureExecutor = new ProcedureExecutor(conf, procedureEnv, procedureStore, + procedureEnv.getProcedureScheduler()); + + final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, + Math.max(Runtime.getRuntime().availableProcessors(), + MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS)); + final boolean abortOnCorruption = conf.getBoolean( + MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION, + MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION); + procedureStore.start(numThreads); + procedureExecutor.start(numThreads, abortOnCorruption); + procedureEnv.getRemoteDispatcher().start(); + } + + private void stopProcedureExecutor() { + if (procedureEnv != null) { + procedureEnv.getRemoteDispatcher().stop(); + } + + if (procedureExecutor != null) { + procedureExecutor.stop(); + } + + if (procedureStore != null) { + procedureStore.stop(isAborted()); + } + } + + @Override + public boolean isInitialized() { + return true; + } + + @Override + public MasterFileSystem getMasterFileSystem() { + return fileSystemManager; + } + + @Override + public MasterWalManager getMasterWalManager() { + return walManager; + } + + @Override + public ProcedureExecutor getMasterProcedureExecutor() { + return procedureExecutor; + } + + @Override + public LoadBalancer getLoadBalancer() { + return balancer; + } + + @Override + public ServerManager getServerManager() { + return serverManager; + } + + @Override + public AssignmentManager getAssignmentManager() { + return assignmentManager; + } + + private static class MockRegionStateStore extends RegionStateStore { + public MockRegionStateStore(final MasterServices master) { + super(master); + } + + public void start() throws IOException { + } + + public void stop() { + } + + public void updateRegionLocation(final HRegionInfo regionInfo, final State state, + final ServerName regionLocation, final ServerName lastHost, final long openSeqNum) + throws IOException { + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java new file mode 100644 index 0000000..69e7443 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java @@ -0,0 +1,568 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.NavigableMap; +import java.util.Random; +import java.util.Set; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; +import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestAssignmentManager { + private static final Log LOG = LogFactory.getLog(TestAssignmentManager.class); + + static { + Logger.getLogger(MasterProcedureScheduler.class).setLevel(Level.TRACE); + } + + private static final int PROC_NTHREADS = 3; // 64; + private static final int NREGIONS = 1 * 1; // 1000; + private static final int NSERVERS = Math.max(1, NREGIONS / 200); + + private HBaseTestingUtility UTIL; + private MockRSProcedureDispatcher rsDispatcher; + private MockMasterServices master; + private AssignmentManager am; + private NavigableMap> regionsToRegionServers = + new ConcurrentSkipListMap>(); + + private void setupConfiguration(Configuration conf) throws Exception { + FSUtils.setRootDir(conf, UTIL.getDataTestDir()); + conf.setBoolean(WALProcedureStore.USE_HSYNC_CONF_KEY, false); + conf.setInt(WALProcedureStore.SYNC_WAIT_MSEC_CONF_KEY, 10); + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, PROC_NTHREADS); + conf.setInt(RSProcedureDispatcher.RS_RPC_STARTUP_WAIT_TIME_CONF_KEY, 1000); + conf.setInt(AssignmentManager.ASSIGN_MAX_ATTEMPTS, 5); + } + + @Before + public void setUp() throws Exception { + UTIL = new HBaseTestingUtility(); + setupConfiguration(UTIL.getConfiguration()); + master = new MockMasterServices(UTIL.getConfiguration(), this.regionsToRegionServers); + rsDispatcher = new MockRSProcedureDispatcher(master); + master.start(NSERVERS, rsDispatcher); + am = master.getAssignmentManager(); + setUpMeta(); + } + + private void setUpMeta() throws Exception { + rsDispatcher.setMockRsExecutor(new GoodRsExecutor()); + am.assignMeta(HRegionInfo.FIRST_META_REGIONINFO); + am.wakeMetaLoadedEvent(); + am.setFailoverCleanupDone(true); + } + + @After + public void tearDown() throws Exception { + master.stop("tearDown"); + } + + @Test //(timeout=6000) + public void testAssignWithGoodExec() throws Exception { + testAssign(new GoodRsExecutor()); + } + + @Test(timeout=60000) + public void testAssignWithRandExec() throws Exception { + final TableName tableName = TableName.valueOf("testAssignWithRandExec"); + final HRegionInfo hri = createRegionInfo(tableName, 1); + + rsDispatcher.setMockRsExecutor(new RandRsExecutor()); + + AssignProcedure proc = am.createAssignProcedure(hri, false); + //waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false, false))); + // TODO + } + + @Test(timeout=60000) + public void testSocketTimeout() throws Exception { + final TableName tableName = TableName.valueOf("testSocketTimeout"); + final HRegionInfo hri = createRegionInfo(tableName, 1); + + rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3)); + waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false))); + + rsDispatcher.setMockRsExecutor(new SocketTimeoutRsExecutor(20, 3)); + waitOnFuture(submitProcedure(am.createUnassignProcedure(hri, null, false))); + } + + @Test(timeout=60000) + public void testServerNotYetRunning() throws Exception { + testRetriesExhaustedFailure(TableName.valueOf("testServerNotYetRunning"), + new ServerNotYetRunningRsExecutor()); + } + + private void testRetriesExhaustedFailure(final TableName tableName, + final MockRSExecutor executor) throws Exception { + final HRegionInfo hri = createRegionInfo(tableName, 1); + + // Test Assign operation failure + rsDispatcher.setMockRsExecutor(executor); + try { + waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false))); + fail("unexpected assign completion"); + } catch (RetriesExhaustedException e) { + // expected exception + LOG.info("expected exception from assign operation: " + e.getMessage(), e); + } + + // Assign the region (without problems) + rsDispatcher.setMockRsExecutor(new GoodRsExecutor()); + waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false))); + + // Test Unassign operation failure + rsDispatcher.setMockRsExecutor(executor); + waitOnFuture(submitProcedure(am.createUnassignProcedure(hri, null, false))); + } + + + @Test(timeout=60000) + public void testIOExceptionOnAssignment() throws Exception { + testFailedOpen(TableName.valueOf("testExceptionOnAssignment"), + new FaultyRsExecutor(new IOException("test fault"))); + } + + @Test(timeout=60000) + public void testDoNotRetryExceptionOnAssignment() throws Exception { + testFailedOpen(TableName.valueOf("testDoNotRetryExceptionOnAssignment"), + new FaultyRsExecutor(new DoNotRetryIOException("test do not retry fault"))); + } + + private void testFailedOpen(final TableName tableName, + final MockRSExecutor executor) throws Exception { + final HRegionInfo hri = createRegionInfo(tableName, 1); + + // Test Assign operation failure + rsDispatcher.setMockRsExecutor(executor); + try { + waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false))); + fail("unexpected assign completion"); + } catch (RetriesExhaustedException e) { + // expected exception + LOG.info("REGION STATE " + am.getRegionStates().getRegionNode(hri)); + LOG.info("expected exception from assign operation: " + e.getMessage(), e); + assertEquals(true, am.getRegionStates().getRegionState(hri).isFailedOpen()); + } + } + + private void testAssign(final MockRSExecutor executor) throws Exception { + testAssign(executor, NREGIONS); + } + + private void testAssign(final MockRSExecutor executor, final int nregions) throws Exception { + rsDispatcher.setMockRsExecutor(executor); + + AssignProcedure[] assignments = new AssignProcedure[nregions]; + + long st = System.currentTimeMillis(); + bulkSubmit(assignments); + + for (int i = 0; i < assignments.length; ++i) { + ProcedureTestingUtility.waitProcedure( + master.getMasterProcedureExecutor(), assignments[i]); + assertTrue(assignments[i].toString(), assignments[i].isSuccess()); + } + long et = System.currentTimeMillis(); + float sec = ((et - st) / 1000.0f); + LOG.info(String.format("[T] Assigning %dprocs in %s (%.2fproc/sec)", + assignments.length, StringUtils.humanTimeDiff(et - st), assignments.length / sec)); + } + + @Test + public void testAssignAnAssignedRegion() throws Exception { + final TableName tableName = TableName.valueOf("testAssignAnAssignedRegion"); + final HRegionInfo hri = createRegionInfo(tableName, 1); + + rsDispatcher.setMockRsExecutor(new GoodRsExecutor()); + + final Future futureA = submitProcedure(am.createAssignProcedure(hri, false)); + final Future futureB = submitProcedure(am.createAssignProcedure(hri, false)); + + // wait first assign + waitOnFuture(futureA); + am.getRegionStates().isRegionInState(hri, State.OPEN); + + // wait second assign + try { + waitOnFuture(futureB); + fail("expected RegionAlreadyAssignedException"); + } catch (RegionAlreadyAssignedException e) { + LOG.info("got expected exception: " + e.getMessage()); + } + + am.getRegionStates().isRegionInState(hri, State.OPEN); + } + + @Test + public void testUnassignAnUnassignedRegion() throws Exception { + final TableName tableName = TableName.valueOf("testUnassignAnUnassignedRegion"); + final HRegionInfo hri = createRegionInfo(tableName, 1); + + rsDispatcher.setMockRsExecutor(new GoodRsExecutor()); + + // assign the region first + waitOnFuture(submitProcedure(am.createAssignProcedure(hri, false))); + + final Future futureA = submitProcedure(am.createUnassignProcedure(hri, null, false)); + final Future futureB = submitProcedure(am.createUnassignProcedure(hri, null, false)); + + // wait first unassign + waitOnFuture(futureA); + am.getRegionStates().isRegionInState(hri, State.CLOSED); + + // wait second unassign + try { + waitOnFuture(futureB); + fail("expected RegionNotAssignedException"); + } catch (RegionNotAssignedException e) { + LOG.info("got expected exception: " + e.getMessage()); + } + + am.getRegionStates().isRegionInState(hri, State.CLOSED); + } + + private Future submitProcedure(final Procedure proc) { + return ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), proc); + } + + private byte[] waitOnFuture(final Future future) throws Exception { + try { + return future.get(); + } catch (ExecutionException e) { + throw (Exception)e.getCause(); + } + } + + // ============================================================================================ + // Helpers + // ============================================================================================ + private void bulkSubmit(final AssignProcedure[] procs) throws Exception { + final Thread[] threads = new Thread[PROC_NTHREADS]; + for (int i = 0; i < threads.length; ++i) { + final int threadId = i; + threads[i] = new Thread() { + @Override + public void run() { + TableName tableName = TableName.valueOf("table-" + threadId); + int n = (procs.length / threads.length); + int start = threadId * n; + int stop = start + n; + for (int j = start; j < stop; ++j) { + procs[j] = createAndSubmitAssign(tableName, j); + } + } + }; + threads[i].start(); + } + for (int i = 0; i < threads.length; ++i) { + threads[i].join(); + } + for (int i = procs.length - 1; i >= 0 && procs[i] == null; --i) { + procs[i] = createAndSubmitAssign(TableName.valueOf("table-sync"), i); + } + } + + private AssignProcedure createAndSubmitAssign(TableName tableName, int regionId) { + HRegionInfo hri = createRegionInfo(tableName, regionId); + AssignProcedure proc = am.createAssignProcedure(hri, false); + master.getMasterProcedureExecutor().submitProcedure(proc); + return proc; + } + + private UnassignProcedure createAndSubmitUnassign(TableName tableName, int regionId) { + HRegionInfo hri = createRegionInfo(tableName, regionId); + UnassignProcedure proc = am.createUnassignProcedure(hri, null, false); + master.getMasterProcedureExecutor().submitProcedure(proc); + return proc; + } + + private HRegionInfo createRegionInfo(final TableName tableName, final long regionId) { + return new HRegionInfo(tableName, + Bytes.toBytes(regionId), Bytes.toBytes(regionId + 1), false, 0); + } + + private void sendTransitionReport(final ServerName serverName, + final RegionInfo regionInfo, final TransitionCode state) throws IOException { + ReportRegionStateTransitionRequest.Builder req = + ReportRegionStateTransitionRequest.newBuilder(); + req.setServer(ProtobufUtil.toServerName(serverName)); + req.addTransition(RegionStateTransition.newBuilder() + .addRegionInfo(regionInfo) + .setTransitionCode(state) + .setOpenSeqNum(1) + .build()); + am.reportRegionStateTransition(req.build()); + } + + private class NoopRsExecutor implements MockRSExecutor { + public ExecuteProceduresResponse sendRequest(ServerName server, + ExecuteProceduresRequest request) throws IOException { + ExecuteProceduresResponse.Builder builder = ExecuteProceduresResponse.newBuilder(); + if (request.getOpenRegionCount() > 0) { + for (OpenRegionRequest req: request.getOpenRegionList()) { + OpenRegionResponse.Builder resp = OpenRegionResponse.newBuilder(); + for (RegionOpenInfo openReq: req.getOpenInfoList()) { + RegionOpeningState state = execOpenRegion(server, openReq); + if (state != null) { + resp.addOpeningState(state); + } + } + builder.addOpenRegion(resp.build()); + } + } + if (request.getCloseRegionCount() > 0) { + for (CloseRegionRequest req: request.getCloseRegionList()) { + CloseRegionResponse resp = execCloseRegion(server, + req.getRegion().getValue().toByteArray()); + if (resp != null) { + builder.addCloseRegion(resp); + } + } + } + return ExecuteProceduresResponse.newBuilder().build(); + } + + protected RegionOpeningState execOpenRegion(ServerName server, RegionOpenInfo regionInfo) + throws IOException { + return null; + } + + protected CloseRegionResponse execCloseRegion(ServerName server, byte[] regionName) + throws IOException { + return null; + } + } + + private class GoodRsExecutor extends NoopRsExecutor { + @Override + protected RegionOpeningState execOpenRegion(ServerName server, RegionOpenInfo openReq) + throws IOException { + sendTransitionReport(server, openReq.getRegion(), TransitionCode.OPENED); + // Concurrency? + // Now update the state of our cluster in regionsToRegionServers. + SortedSet regions = regionsToRegionServers.get(server); + if (regions == null) { + regions = new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR); + regionsToRegionServers.put(server, regions); + } + HRegionInfo hri = HRegionInfo.convert(openReq.getRegion()); + if (regions.contains(hri.getRegionName())) { + throw new UnsupportedOperationException(hri.getRegionNameAsString()); + } + regions.add(hri.getRegionName()); + return RegionOpeningState.OPENED; + } + + @Override + protected CloseRegionResponse execCloseRegion(ServerName server, byte[] regionName) + throws IOException { + HRegionInfo hri = am.getRegionInfo(regionName); + sendTransitionReport(server, HRegionInfo.convert(hri), TransitionCode.CLOSED); + return CloseRegionResponse.newBuilder().setClosed(true).build(); + } + } + + private static class ServerNotYetRunningRsExecutor implements MockRSExecutor { + public ExecuteProceduresResponse sendRequest(ServerName server, ExecuteProceduresRequest req) + throws IOException { + throw new ServerNotRunningYetException("wait on server startup"); + } + } + + private static class FaultyRsExecutor implements MockRSExecutor { + private final IOException exception; + + public FaultyRsExecutor(final IOException exception) { + this.exception = exception; + } + + public ExecuteProceduresResponse sendRequest(ServerName server, ExecuteProceduresRequest req) + throws IOException { + throw exception; + } + } + + private class SocketTimeoutRsExecutor extends GoodRsExecutor { + private final int maxSocketTimeoutRetries; + private final int maxServerRetries; + + private ServerName lastServer; + private int sockTimeoutRetries; + private int serverRetries; + + public SocketTimeoutRsExecutor(int maxSocketTimeoutRetries, int maxServerRetries) { + this.maxServerRetries = maxServerRetries; + this.maxSocketTimeoutRetries = maxSocketTimeoutRetries; + } + + public ExecuteProceduresResponse sendRequest(ServerName server, ExecuteProceduresRequest req) + throws IOException { + // SocketTimeoutException should be a temporary problem + // unless the server will be declared dead. + if (sockTimeoutRetries++ < maxSocketTimeoutRetries) { + if (sockTimeoutRetries == 1) assertNotEquals(lastServer, server); + lastServer = server; + LOG.debug("socket timeout for server=" + server + " retries=" + sockTimeoutRetries); + throw new SocketTimeoutException("simulate socket timeout"); + } else if (serverRetries++ < maxServerRetries) { + LOG.info("mark server=" + server + " as dead. serverRetries=" + serverRetries); + master.getServerManager().moveFromOnelineToDeadServers(server); + sockTimeoutRetries = 0; + throw new SocketTimeoutException("simulate socket timeout"); + } else { + return super.sendRequest(server, req); + } + } + } + + private class RandRsExecutor extends NoopRsExecutor { + private final Random rand = new Random(); + + public ExecuteProceduresResponse sendRequest(ServerName server, ExecuteProceduresRequest req) + throws IOException { + switch (rand.nextInt(5)) { + case 0: throw new ServerNotRunningYetException("wait on server startup"); + case 1: throw new SocketTimeoutException("simulate socket timeout"); + case 2: throw new RemoteException("java.io.IOException", "unexpected exception"); + } + return super.sendRequest(server, req); + } + + @Override + protected RegionOpeningState execOpenRegion(ServerName server, RegionOpenInfo openReq) + throws IOException { + switch (rand.nextInt(6)) { + case 0: + return OpenRegionResponse.RegionOpeningState.OPENED; + case 1: + sendTransitionReport(server, openReq.getRegion(), TransitionCode.OPENED); + return OpenRegionResponse.RegionOpeningState.ALREADY_OPENED; + case 2: + sendTransitionReport(server, openReq.getRegion(), TransitionCode.FAILED_OPEN); + return OpenRegionResponse.RegionOpeningState.FAILED_OPENING; + } + return null; + } + + @Override + protected CloseRegionResponse execCloseRegion(ServerName server, byte[] regionName) + throws IOException { + CloseRegionResponse.Builder resp = CloseRegionResponse.newBuilder(); + boolean closed = rand.nextBoolean(); + if (closed) { + HRegionInfo hri = am.getRegionInfo(regionName); + sendTransitionReport(server, HRegionInfo.convert(hri), TransitionCode.CLOSED); + } + resp.setClosed(closed); + return resp.build(); + } + } + + private interface MockRSExecutor { + ExecuteProceduresResponse sendRequest(ServerName server, ExecuteProceduresRequest req) + throws IOException; + } + + private class MockRSProcedureDispatcher extends RSProcedureDispatcher { + private MockRSExecutor mockRsExec; + + public MockRSProcedureDispatcher(final MasterServices master) { + super(master); + } + + public void setMockRsExecutor(final MockRSExecutor mockRsExec) { + this.mockRsExec = mockRsExec; + } + + @Override + protected void remoteDispatch(ServerName serverName, Set operations) { + submitTask(new MockRemoteCall(serverName, operations)); + } + + private class MockRemoteCall extends ExecuteProceduresRemoteCall { + public MockRemoteCall(final ServerName serverName, + final Set operations) { + super(serverName, operations); + } + + @Override + protected ExecuteProceduresResponse sendRequest(final ServerName serverName, + final ExecuteProceduresRequest request) throws IOException { + return mockRsExec.sendRequest(serverName, request); + } + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java new file mode 100644 index 0000000..e4cec45 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentOnRSCrash.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; +import org.apache.hadoop.hbase.util.Bytes; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +@Category({MasterTests.class, LargeTests.class}) +public class TestAssignmentOnRSCrash { + private static final Log LOG = LogFactory.getLog(TestAssignmentOnRSCrash.class); + + private static final TableName TEST_TABLE = TableName.valueOf("testb"); + private static final String FAMILY_STR = "f"; + private static final byte[] FAMILY = Bytes.toBytes(FAMILY_STR); + private static final int NUM_RS = 3; + + private HBaseTestingUtility UTIL; + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + conf.set("hbase.balancer.tablesOnMaster", "none"); + } + + @Before + public void setup() throws Exception { + UTIL = new HBaseTestingUtility(); + + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(NUM_RS); + + UTIL.createTable(TEST_TABLE, new byte[][] { FAMILY }, new byte[][] { + Bytes.toBytes("B"), Bytes.toBytes("D"), Bytes.toBytes("F"), Bytes.toBytes("L") + }); + } + + @After + public void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Test(timeout=30000) + public void testKillRsWithUserRegionWithData() throws Exception { + testCrashRsWithUserRegion(true, true); + } + + @Test(timeout=30000) + public void testKillRsWithUserRegionWithoutData() throws Exception { + testCrashRsWithUserRegion(true, false); + } + + @Test(timeout=30000) + public void testStopRsWithUserRegionWithData() throws Exception { + testCrashRsWithUserRegion(false, true); + } + + @Test(timeout=30000) + public void testStopRsWithUserRegionWithoutData() throws Exception { + testCrashRsWithUserRegion(false, false); + } + + private void testCrashRsWithUserRegion(final boolean kill, final boolean withData) + throws Exception { + final int NROWS = 100; + int nkilled = 0; + for (HRegionInfo hri: UTIL.getHBaseAdmin().getTableRegions(TEST_TABLE)) { + ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri); + if (AssignmentTestingUtil.isServerHoldingMeta(UTIL, serverName)) continue; + + if (withData) { + testInsert(hri, NROWS); + } + + // wait for regions to enter in transition and then to get out of transition + AssignmentTestingUtil.crashRs(UTIL, serverName, kill); + AssignmentTestingUtil.waitForRegionToBeInTransition(UTIL, hri); + UTIL.waitUntilNoRegionsInTransition(); + + if (withData) { + assertEquals(NROWS, testGet(hri, NROWS)); + } + + // region should be moved to another RS + assertNotEquals(serverName, AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri)); + + if (++nkilled == (NUM_RS - 1)) { + break; + } + } + assertTrue("expected RSs to be killed", nkilled > 0); + } + + @Test(timeout=60000) + public void testKillRsWithMetaRegion() throws Exception { + testCrashRsWithMetaRegion(true); + } + + @Test(timeout=60000) + public void testStopRsWithMetaRegion() throws Exception { + testCrashRsWithMetaRegion(false); + } + + private void testCrashRsWithMetaRegion(final boolean kill) throws Exception { + int nkilled = 0; + for (HRegionInfo hri: AssignmentTestingUtil.getMetaRegions(UTIL)) { + ServerName serverName = AssignmentTestingUtil.crashRsWithRegion(UTIL, hri, kill); + + // wait for region to enter in transition and then to get out of transition + AssignmentTestingUtil.waitForRegionToBeInTransition(UTIL, hri); + UTIL.waitUntilNoRegionsInTransition(); + testGet(hri, 10); + + // region should be moved to another RS + assertNotEquals(serverName, AssignmentTestingUtil.getServerHoldingRegion(UTIL, hri)); + + if (++nkilled == (NUM_RS - 1)) { + break; + } + } + assertTrue("expected RSs to be killed", nkilled > 0); + } + + private void testInsert(final HRegionInfo hri, final int nrows) throws IOException { + final Table table = UTIL.getConnection().getTable(hri.getTable()); + for (int i = 0; i < nrows; ++i) { + final byte[] row = Bytes.add(hri.getStartKey(), Bytes.toBytes(i)); + final Put put = new Put(row); + put.addColumn(FAMILY, null, row); + table.put(put); + } + } + + public int testGet(final HRegionInfo hri, final int nrows) throws IOException { + int nresults = 0; + final Table table = UTIL.getConnection().getTable(hri.getTable()); + for (int i = 0; i < nrows; ++i) { + final byte[] row = Bytes.add(hri.getStartKey(), Bytes.toBytes(i)); + final Result result = table.get(new Get(row)); + if (result != null && !result.isEmpty() && + Bytes.equals(row, result.getValue(FAMILY, null))) { + nresults++; + } + } + return nresults; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java new file mode 100644 index 0000000..1d2481d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.assignment; + +import java.lang.Thread.UncaughtExceptionHandler; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@Category({MasterTests.class, MediumTests.class}) +public class TestRegionStates { + private static final Log LOG = LogFactory.getLog(TestRegionStates.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static ThreadPoolExecutor threadPool; + private static ExecutorCompletionService executorService; + + @BeforeClass + public static void setUp() throws Exception { + threadPool = Threads.getBoundedCachedThreadPool(32, 60L, TimeUnit.SECONDS, + Threads.newDaemonThreadFactory("ProcedureDispatcher", + new UncaughtExceptionHandler() { + @Override + public void uncaughtException(Thread t, Throwable e) { + LOG.warn("Failed thread " + t.getName(), e); + } + })); + executorService = new ExecutorCompletionService(threadPool); + } + + @AfterClass + public static void tearDown() throws Exception { + threadPool.shutdown(); + } + + @Before + public void testSetup() { + } + + @After + public void testTearDown() throws Exception { + while (true) { + Future f = executorService.poll(); + if (f == null) break; + f.get(); + } + } + + private static void waitExecutorService(final int count) throws Exception { + for (int i = 0; i < count; ++i) { + executorService.take().get(); + } + } + + // ========================================================================== + // Regions related + // ========================================================================== + + @Test + public void testRegionDoubleCreation() throws Exception { + // NOTE: HRegionInfo sort by table first, so we are relying on that + final TableName TABLE_NAME_A = TableName.valueOf("testOrderedByTableA"); + final TableName TABLE_NAME_B = TableName.valueOf("testOrderedByTableB"); + final TableName TABLE_NAME_C = TableName.valueOf("testOrderedByTableC"); + final RegionStates stateMap = new RegionStates(); + final int NRUNS = 1000; + final int NSMALL_RUNS = 3; + + // add some regions for table B + for (int i = 0; i < NRUNS; ++i) { + addRegionNode(stateMap, TABLE_NAME_B, i); + } + // re-add the regions for table B + for (int i = 0; i < NRUNS; ++i) { + addRegionNode(stateMap, TABLE_NAME_B, i); + } + waitExecutorService(NRUNS * 2); + + // add two other tables A and C that will be placed before and after table B (sort order) + for (int i = 0; i < NSMALL_RUNS; ++i) { + addRegionNode(stateMap, TABLE_NAME_A, i); + addRegionNode(stateMap, TABLE_NAME_C, i); + } + + // check for the list of regions of the 3 tables + checkTableRegions(stateMap, TABLE_NAME_A, NSMALL_RUNS); + checkTableRegions(stateMap, TABLE_NAME_B, NRUNS); + checkTableRegions(stateMap, TABLE_NAME_C, NSMALL_RUNS); + } + + private void checkTableRegions(final RegionStates stateMap, + final TableName tableName, final int nregions) { + List hris = stateMap.getRegionsOfTable(tableName); + assertEquals(nregions, hris.size()); + for (int i = 1; i < hris.size(); ++i) { + long a = Bytes.toLong(hris.get(i - 1).getStartKey()); + long b = Bytes.toLong(hris.get(i + 0).getStartKey()); + assertEquals(b, a + 1); + } + } + + private void addRegionNode(final RegionStates stateMap, + final TableName tableName, final long regionId) { + executorService.submit(new Callable() { + @Override + public Object call() { + HRegionInfo hri = new HRegionInfo(tableName, + Bytes.toBytes(regionId), Bytes.toBytes(regionId + 1), false, 0); + return stateMap.getOrCreateRegionNode(hri); + } + }); + } + + private Object createRegionNode(final RegionStates stateMap, + final TableName tableName, final long regionId) { + return stateMap.getOrCreateRegionNode(createRegionInfo(tableName, regionId)); + } + + private HRegionInfo createRegionInfo(final TableName tableName, final long regionId) { + return new HRegionInfo(tableName, + Bytes.toBytes(regionId), Bytes.toBytes(regionId + 1), false, 0); + } + + @Test + public void testPerf() throws Exception { + final TableName TABLE_NAME = TableName.valueOf("testPerf"); + final int NRUNS = 1000000; // 1M + final RegionStates stateMap = new RegionStates(); + + long st = System.currentTimeMillis(); + for (int i = 0; i < NRUNS; ++i) { + final int regionId = i; + executorService.submit(new Callable() { + @Override + public Object call() { + HRegionInfo hri = createRegionInfo(TABLE_NAME, regionId); + return stateMap.getOrCreateRegionNode(hri); + } + }); + } + waitExecutorService(NRUNS); + long et = System.currentTimeMillis(); + LOG.info(String.format("PERF STATEMAP INSERT: %s %s/sec", + StringUtils.humanTimeDiff(et - st), + StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); + + st = System.currentTimeMillis(); + for (int i = 0; i < NRUNS; ++i) { + final int regionId = i; + executorService.submit(new Callable() { + @Override + public Object call() { + HRegionInfo hri = createRegionInfo(TABLE_NAME, regionId); + return stateMap.getRegionState(hri); + } + }); + } + + waitExecutorService(NRUNS); + et = System.currentTimeMillis(); + LOG.info(String.format("PERF STATEMAP GET: %s %s/sec", + StringUtils.humanTimeDiff(et - st), + StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); + } + + @Test + public void testPerfSingleThread() { + final TableName TABLE_NAME = TableName.valueOf("testPerf"); + final int NRUNS = 1 * 1000000; // 1M + + final RegionStates stateMap = new RegionStates(); + long st = System.currentTimeMillis(); + for (int i = 0; i < NRUNS; ++i) { + stateMap.createRegionNode(createRegionInfo(TABLE_NAME, i)); + } + long et = System.currentTimeMillis(); + LOG.info(String.format("PERF SingleThread: %s %s/sec", + StringUtils.humanTimeDiff(et - st), + StringUtils.humanSize(NRUNS / ((et - st) / 1000.0f)))); + } + + // ========================================================================== + // Server related + // ========================================================================== +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 7e6691d..48b5a34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; import java.util.TreeSet; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; @@ -48,7 +49,11 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterMetaBootstrap; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.util.Bytes; @@ -59,7 +64,45 @@ import org.apache.hadoop.hbase.util.ModifyRegionUtils; public class MasterProcedureTestingUtility { private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class); - private MasterProcedureTestingUtility() { + private MasterProcedureTestingUtility() { } + + public static void restartMasterProcedureExecutor(ProcedureExecutor procExec) + throws Exception { + final MasterProcedureEnv env = procExec.getEnvironment(); + final HMaster master = (HMaster)env.getMasterServices(); + ProcedureTestingUtility.restart(procExec, true, true, + // stop services + new Callable() { + @Override + public Void call() throws Exception { + final AssignmentManager am = env.getAssignmentManager(); + // try to simulate a master restart by removing the ServerManager states about seqIDs + for (RegionState regionState: am.getRegionStates().getRegionStates()) { + env.getMasterServices().getServerManager().removeRegion(regionState.getRegion()); + } + am.stop(); + master.setInitialized(false); + return null; + } + }, + // restart services + new Callable() { + @Override + public Void call() throws Exception { + final AssignmentManager am = env.getAssignmentManager(); + am.start(); + if (true) { + MasterMetaBootstrap metaBootstrap = new MasterMetaBootstrap(master, + TaskMonitor.get().createStatus("meta")); + metaBootstrap.splitMetaLogsBeforeAssignment(); + metaBootstrap.assignMeta(); + metaBootstrap.processDeadServers(); + } + am.joinCluster(); + master.setInitialized(true); + return null; + } + }); } // ========================================================================== @@ -295,6 +338,9 @@ public class MasterProcedureTestingUtility { return put; } + // ========================================================================== + // Procedure Helpers + // ========================================================================== public static long generateNonceGroup(final HMaster master) { return master.getClusterConnection().getNonceGenerator().getNonceGroup(); } @@ -318,13 +364,6 @@ public class MasterProcedureTestingUtility { * finish. * @see #testRecoveryAndDoubleExecution(ProcedureExecutor, long) */ - public static void testRecoveryAndDoubleExecution( - final ProcedureExecutor procExec, final long procId, - final int numSteps) throws Exception { - testRecoveryAndDoubleExecution(procExec, procId, numSteps, true); - ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - } - private static void testRecoveryAndDoubleExecution( final ProcedureExecutor procExec, final long procId, final int numSteps, final boolean expectExecRunning) throws Exception { @@ -336,9 +375,9 @@ public class MasterProcedureTestingUtility { // restart executor/store // execute step N - save on store for (int i = 0; i < numSteps; ++i) { - LOG.info("Restart " + i + " exec state: " + procExec.getProcedure(procId)); + LOG.info("Restart " + i + " exec state=" + procExec.getProcedure(procId)); ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); - ProcedureTestingUtility.restart(procExec); + restartMasterProcedureExecutor(procExec); ProcedureTestingUtility.waitProcedure(procExec, procId); } @@ -366,8 +405,8 @@ public class MasterProcedureTestingUtility { ProcedureTestingUtility.waitProcedure(procExec, procId); assertEquals(false, procExec.isRunning()); for (int i = 0; !procExec.isFinished(procId); ++i) { - LOG.info("Restart " + i + " exec state: " + procExec.getProcedure(procId)); - ProcedureTestingUtility.restart(procExec); + LOG.info("Restart " + i + " exec state=" + procExec.getProcedure(procId)); + restartMasterProcedureExecutor(procExec); ProcedureTestingUtility.waitProcedure(procExec, procId); } assertEquals(true, procExec.isRunning()); @@ -399,7 +438,7 @@ public class MasterProcedureTestingUtility { for (int i = 0; !procExec.isFinished(procId); ++i) { LOG.info("Restart " + i + " rollback state: " + procExec.getProcedure(procId)); ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); - ProcedureTestingUtility.restart(procExec); + restartMasterProcedureExecutor(procExec); ProcedureTestingUtility.waitProcedure(procExec, procId); } } finally { @@ -444,7 +483,7 @@ public class MasterProcedureTestingUtility { try { ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); LOG.info("Restart and rollback procId=" + procId); - ProcedureTestingUtility.restart(procExec); + restartMasterProcedureExecutor(procExec); ProcedureTestingUtility.waitProcedure(procExec, procId); } finally { assertTrue(procExec.unregisterListener(abortListener)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java index 31eedfc..506e537 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java @@ -22,26 +22,28 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; @Category({MasterTests.class, MediumTests.class}) public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestAddColumnFamilyProcedure.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); - @Rule - public TestName name = new TestName(); + @Rule public TestName name = new TestName(); @Test(timeout = 60000) public void testAddColumnFamily() throws Exception { @@ -61,8 +63,7 @@ public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), - tableName, cf1); + MasterProcedureTestingUtility.validateColumnFamilyAddition(getMaster(), tableName, cf1); // Test 2: Add a column family offline UTIL.getAdmin().disableTable(tableName); @@ -71,8 +72,7 @@ public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId2); ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); - MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), - tableName, cf2); + MasterProcedureTestingUtility.validateColumnFamilyAddition(getMaster(), tableName, cf2); } @Test(timeout=60000) @@ -91,8 +91,7 @@ public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), - tableName, cf2); + MasterProcedureTestingUtility.validateColumnFamilyAddition(getMaster(), tableName, cf2); // add the column family that exists long procId2 = procExec.submitProcedure( @@ -140,11 +139,9 @@ public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor)); // Restart the executor and execute the step twice - int numberOfSteps = AddColumnFamilyState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), - tableName, cf4); + MasterProcedureTestingUtility.validateColumnFamilyAddition(getMaster(), tableName, cf4); } @Test(timeout = 60000) @@ -164,11 +161,9 @@ public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor)); // Restart the executor and execute the step twice - int numberOfSteps = AddColumnFamilyState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), - tableName, cf5); + MasterProcedureTestingUtility.validateColumnFamilyAddition(getMaster(), tableName, cf5); } @Test(timeout = 60000) @@ -187,10 +182,9 @@ public class TestAddColumnFamilyProcedure extends TestTableDDLProcedureBase { long procId = procExec.submitProcedure( new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor)); - int numberOfSteps = 1; // failing at "pre operations" + int numberOfSteps = 0; // failing at "pre operations" MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf6); + MasterProcedureTestingUtility.validateColumnFamilyDeletion(getMaster(), tableName, cf6); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java index d8221bb..8f7686a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.master.procedure; +import static org.junit.Assert.assertTrue; + import java.util.List; import org.apache.commons.logging.Log; @@ -28,12 +30,11 @@ import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.client.SnapshotDescription; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -42,8 +43,6 @@ import org.junit.After; import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.junit.Assert.assertTrue; - @Category({MasterTests.class, MediumTests.class}) public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestCloneSnapshotProcedure.class); @@ -146,8 +145,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase { new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc)); // Restart the executor and execute the step twice - int numberOfSteps = CloneSnapshotState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); MasterProcedureTestingUtility.validateTableIsEnabled( UTIL.getHBaseCluster().getMaster(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java index 995d98d..147ee27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateNamespaceState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; @@ -191,8 +190,7 @@ public class TestCreateNamespaceProcedure { new CreateNamespaceProcedure(procExec.getEnvironment(), nsd)); // Restart the executor and execute the step twice - int numberOfSteps = CreateNamespaceState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); // Validate the creation of namespace ProcedureTestingUtility.assertProcNotFailed(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index c09016c..6bd88c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.master.procedure; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -40,8 +42,12 @@ import static org.junit.Assert.assertTrue; @Category({MasterTests.class, MediumTests.class}) public class TestCreateTableProcedure extends TestTableDDLProcedureBase { - @Rule - public TestName name = new TestName(); + private static final Log LOG = LogFactory.getLog(TestCreateTableProcedure.class); + + private static final String F1 = "f1"; + private static final String F2 = "f2"; + + @Rule public TestName name = new TestName(); @Test(timeout=60000) public void testSimpleCreate() throws Exception { @@ -61,9 +67,8 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase { private void testSimpleCreate(final TableName tableName, byte[][] splitKeys) throws Exception { HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( - getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); - MasterProcedureTestingUtility.validateTableCreation( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + getMasterProcedureExecutor(), tableName, splitKeys, F1, F2); + MasterProcedureTestingUtility.validateTableCreation(getMaster(), tableName, regions, F1, F2); } @Test(timeout=60000) @@ -126,25 +131,21 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase { new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); // Restart the executor and execute the step twice - // NOTE: the 6 (number of CreateTableState steps) is hardcoded, - // so you have to look at this test at least once when you add a new step. - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, 6); - - MasterProcedureTestingUtility.validateTableCreation( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); + MasterProcedureTestingUtility.validateTableCreation(getMaster(), tableName, regions, F1, F2); } @Test(timeout=90000) public void testRollbackAndDoubleExecution() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - testRollbackAndDoubleExecution(MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2")); + testRollbackAndDoubleExecution(MasterProcedureTestingUtility.createHTD(tableName, F1, F2)); } @Test(timeout=90000) public void testRollbackAndDoubleExecutionOnMobTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); - HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); - htd.getFamily(Bytes.toBytes("f1")).setMobEnabled(true); + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2); + htd.getFamily(Bytes.toBytes(F1)).setMobEnabled(true); testRollbackAndDoubleExecution(htd); } @@ -166,11 +167,24 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase { MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); TableName tableName = htd.getTableName(); - MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName); + MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName); // are we able to create the table after a rollback? resetProcExecutorTestingKillFlag(); testSimpleCreate(tableName, splitKeys); } + + @Test + public void testMRegions() throws Exception { + final byte[][] splitKeys = new byte[500][]; + for (int i = 0; i < splitKeys.length; ++i) { + splitKeys[i] = Bytes.toBytes(String.format("%08d", i)); + } + + final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD( + TableName.valueOf("TestMRegions"), F1, F2); + UTIL.getHBaseAdmin().createTableAsync(htd, splitKeys) + .get(10, java.util.concurrent.TimeUnit.HOURS); + LOG.info("TABLE CREATED"); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java index c4bdc18..6096755 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java @@ -22,26 +22,27 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; @Category({MasterTests.class, MediumTests.class}) public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestDeleteColumnFamilyProcedure.class); - - @Rule - public TestName name = new TestName(); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); + @Rule public TestName name = new TestName(); @Test(timeout = 60000) public void testDeleteColumnFamily() throws Exception { @@ -59,8 +60,7 @@ public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase { ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf1); + MasterProcedureTestingUtility.validateColumnFamilyDeletion(getMaster(), tableName, cf1); // Test 2: delete the column family that exists offline UTIL.getAdmin().disableTable(tableName); @@ -88,8 +88,7 @@ public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase { // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf2); + MasterProcedureTestingUtility.validateColumnFamilyDeletion(getMaster(), tableName, cf2); // delete the column family that does not exist long procId2 = procExec.submitProcedure( @@ -159,11 +158,9 @@ public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase { new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf4.getBytes())); // Restart the executor and execute the step twice - int numberOfSteps = DeleteColumnFamilyState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf4); + MasterProcedureTestingUtility.validateColumnFamilyDeletion(getMaster(), tableName, cf4); } @Test(timeout = 60000) @@ -183,11 +180,9 @@ public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase { new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes())); // Restart the executor and execute the step twice - int numberOfSteps = DeleteColumnFamilyState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf5); + MasterProcedureTestingUtility.validateColumnFamilyDeletion(getMaster(), tableName, cf5); } @Test(timeout = 60000) @@ -207,10 +202,10 @@ public class TestDeleteColumnFamilyProcedure extends TestTableDDLProcedureBase { long procId = procExec.submitProcedure( new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes())); - int numberOfSteps = 1; // failing at pre operation + int numberOfSteps = 0; // failing at pre operation MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); MasterProcedureTestingUtility.validateTableCreation( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2", "f3", cf5); + getMaster(), tableName, regions, "f1", "f2", "f3", cf5); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java index acedf1d..5ecacb6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; @@ -175,8 +174,7 @@ public class TestDeleteNamespaceProcedure { new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName)); // Restart the executor and execute the step twice - int numberOfSteps = DeleteNamespaceState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); // Validate the deletion of namespace ProcedureTestingUtility.assertProcNotFailed(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java index 431e3e4..2a11544 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.procedure; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; @@ -34,15 +35,16 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; import static org.junit.Assert.assertTrue; @Category({MasterTests.class, MediumTests.class}) public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestDeleteTableProcedure.class); - - @Rule - public TestName name = new TestName(); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); + @Rule public TestName name = new TestName(); @Test(timeout=60000, expected=TableNotFoundException.class) public void testDeleteNotExistentTable() throws Exception { @@ -90,8 +92,7 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName); + MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName); // Second delete should fail with TableNotFound ProcedureInfo result = procExec.getResult(procId2); @@ -126,8 +127,7 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { long procId = ProcedureTestingUtility.submitAndWait(procExec, new DeleteTableProcedure(procExec.getEnvironment(), tableName)); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName); + MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName); } @Test(timeout=60000) @@ -149,11 +149,8 @@ public class TestDeleteTableProcedure extends TestTableDDLProcedureBase { new DeleteTableProcedure(procExec.getEnvironment(), tableName)); // Restart the executor and execute the step twice - // NOTE: the 6 (number of DeleteTableState steps) is hardcoded, - // so you have to look at this test at least once when you add a new step. - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, 6); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); - MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName); + MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java index d3fccbe..11c4e2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java @@ -22,12 +22,12 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -36,13 +36,15 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; @Category({MasterTests.class, MediumTests.class}) public class TestDisableTableProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestDisableTableProcedure.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); - @Rule - public TestName name = new TestName(); + @Rule public TestName name = new TestName(); @Test(timeout = 60000) public void testDisableTable() throws Exception { @@ -57,8 +59,7 @@ public class TestDisableTableProcedure extends TestTableDDLProcedureBase { // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.validateTableIsDisabled(getMaster(), tableName); } @Test(timeout = 60000) @@ -74,8 +75,7 @@ public class TestDisableTableProcedure extends TestTableDDLProcedureBase { // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.validateTableIsDisabled(getMaster(), tableName); // Disable the table again - expect failure long procId2 = procExec.submitProcedure(new DisableTableProcedure( @@ -107,8 +107,7 @@ public class TestDisableTableProcedure extends TestTableDDLProcedureBase { // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId4); ProcedureTestingUtility.assertProcNotFailed(procExec, procId4); - MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.validateTableIsDisabled(getMaster(), tableName); } @Test(timeout=60000) @@ -128,9 +127,8 @@ public class TestDisableTableProcedure extends TestTableDDLProcedureBase { new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); // Restart the executor and execute the step twice - int numberOfSteps = DisableTableState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); - MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); + + MasterProcedureTestingUtility.validateTableIsDisabled(getMaster(), tableName); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java index 81f2576..01f9ed8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java @@ -22,12 +22,12 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.EnableTableState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -36,13 +36,14 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; @Category({MasterTests.class, MediumTests.class}) public class TestEnableTableProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestEnableTableProcedure.class); - - @Rule - public TestName name = new TestName(); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); + @Rule public TestName name = new TestName(); @Test(timeout = 60000) public void testEnableTable() throws Exception { @@ -58,8 +59,7 @@ public class TestEnableTableProcedure extends TestTableDDLProcedureBase { // Wait the completion ProcedureTestingUtility.waitProcedure(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); - MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.validateTableIsEnabled(getMaster(), tableName); } @Test(timeout=60000, expected=TableNotDisabledException.class) @@ -113,10 +113,9 @@ public class TestEnableTableProcedure extends TestTableDDLProcedureBase { new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); // Restart the executor and execute the step twice - int numberOfSteps = EnableTableState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); - MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); + + MasterProcedureTestingUtility.validateTableIsEnabled(getMaster(), tableName); } @Test(timeout = 60000) @@ -136,9 +135,8 @@ public class TestEnableTableProcedure extends TestTableDDLProcedureBase { long procId = procExec.submitProcedure( new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); - int numberOfSteps = 1; // failing at pre operation + int numberOfSteps = 0; // failing at pre operation MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); - MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), - tableName); + MasterProcedureTestingUtility.validateTableIsDisabled(getMaster(), tableName); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index af48302..d2df2bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; @@ -55,6 +54,7 @@ import org.junit.rules.TestRule; public class TestMasterFailoverWithProcedures { private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class); + @ClassRule public static final TestRule timeout = CategoryBasedTimeout.forClass(TestMasterFailoverWithProcedures.class); @@ -116,7 +116,7 @@ public class TestMasterFailoverWithProcedures { HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); long procId = procExec.submitProcedure( new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); - testRecoveryAndDoubleExecution(UTIL, procId, step, CreateTableState.values()); + testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableCreation( UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); @@ -154,7 +154,7 @@ public class TestMasterFailoverWithProcedures { // Start the Delete procedure && kill the executor long procId = procExec.submitProcedure( new DeleteTableProcedure(procExec.getEnvironment(), tableName)); - testRecoveryAndDoubleExecution(UTIL, procId, step, DeleteTableState.values()); + testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableDeletion( UTIL.getHBaseCluster().getMaster(), tableName); @@ -197,7 +197,7 @@ public class TestMasterFailoverWithProcedures { // Start the Truncate procedure && kill the executor long procId = procExec.submitProcedure( new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits)); - testRecoveryAndDoubleExecution(UTIL, procId, step, TruncateTableState.values()); + testRecoveryAndDoubleExecution(UTIL, procId, step); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); UTIL.waitUntilAllRegionsAssigned(tableName); @@ -251,7 +251,7 @@ public class TestMasterFailoverWithProcedures { // Start the Delete procedure && kill the executor long procId = procExec.submitProcedure( new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); - testRecoveryAndDoubleExecution(UTIL, procId, step, DisableTableState.values()); + testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableIsDisabled( UTIL.getHBaseCluster().getMaster(), tableName); @@ -288,7 +288,7 @@ public class TestMasterFailoverWithProcedures { // Start the Delete procedure && kill the executor long procId = procExec.submitProcedure( new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); - testRecoveryAndDoubleExecution(UTIL, procId, step, EnableTableState.values()); + testRecoveryAndDoubleExecution(UTIL, procId, step); MasterProcedureTestingUtility.validateTableIsEnabled( UTIL.getHBaseCluster().getMaster(), tableName); @@ -297,16 +297,17 @@ public class TestMasterFailoverWithProcedures { // ========================================================================== // Test Helpers // ========================================================================== - public static void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil, - final long procId, final int lastStepBeforeFailover, TState[] states) throws Exception { + public static void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil, + final long procId, final int lastStepBeforeFailover) throws Exception { ProcedureExecutor procExec = testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor(); ProcedureTestingUtility.waitProcedure(procExec, procId); + final Procedure proc = procExec.getProcedure(procId); for (int i = 0; i < lastStepBeforeFailover; ++i) { - LOG.info("Restart "+ i +" exec state: " + states[i]); + LOG.info("Restart "+ i +" exec state: " + proc); ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); - ProcedureTestingUtility.restart(procExec); + MasterProcedureTestingUtility.restartMasterProcedureExecutor(procExec); ProcedureTestingUtility.waitProcedure(procExec, procId); } ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java index fc85de2..1ed8cf7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java @@ -109,7 +109,7 @@ public class TestMasterProcedureEvents { ProcedureExecutor procExec = master.getMasterProcedureExecutor(); while (!master.isServerCrashProcessingEnabled() || !master.isInitialized() || - master.getAssignmentManager().getRegionStates().isRegionsInTransition()) { + master.getAssignmentManager().getRegionStates().hasRegionsInTransition()) { Thread.sleep(25); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java index 184150b..f1667ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Rule; @@ -125,8 +124,7 @@ public class TestModifyColumnFamilyProcedure extends TestTableDDLProcedureBase { new ModifyColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor)); // Restart the executor and execute the step twice - int numberOfSteps = ModifyColumnFamilyState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() .getMaster(), tableName, cf3, columnDescriptor); @@ -153,8 +151,7 @@ public class TestModifyColumnFamilyProcedure extends TestTableDDLProcedureBase { new ModifyColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor)); // Restart the executor and execute the step twice - int numberOfSteps = ModifyColumnFamilyState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() .getMaster(), tableName, cf4, columnDescriptor); @@ -180,7 +177,7 @@ public class TestModifyColumnFamilyProcedure extends TestTableDDLProcedureBase { long procId = procExec.submitProcedure( new ModifyColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor)); - int numberOfSteps = 1; // failing at pre operation + int numberOfSteps = 0; // failing at pre operation MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java index 1b53d23..5cb117b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyNamespaceState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.After; @@ -212,8 +211,7 @@ public class TestModifyNamespaceProcedure { new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd)); // Restart the executor and execute the step twice - int numberOfSteps = ModifyNamespaceState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); // Validate diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index c5c6484..8872c63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -30,18 +31,19 @@ import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; @Category({MasterTests.class, MediumTests.class}) public class TestModifyTableProcedure extends TestTableDDLProcedureBase { - @Rule - public TestName name = new TestName(); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); + @Rule public TestName name = new TestName(); @Test(timeout=60000) public void testModifyTable() throws Exception { @@ -208,8 +210,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { new ModifyTableProcedure(procExec.getEnvironment(), htd)); // Restart the executor and execute the step twice - int numberOfSteps = ModifyTableState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); // Validate descriptor HTableDescriptor currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); @@ -246,8 +247,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { new ModifyTableProcedure(procExec.getEnvironment(), htd)); // Restart the executor and execute the step twice - int numberOfSteps = ModifyTableState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); // Validate descriptor HTableDescriptor currentHtd = UTIL.getAdmin().getTableDescriptor(tableName); @@ -282,7 +282,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { long procId = procExec.submitProcedure( new ModifyTableProcedure(procExec.getEnvironment(), htd)); - int numberOfSteps = 1; // failing at pre operation + int numberOfSteps = 0; // failing at pre operation MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); // cf2 should not be present @@ -315,7 +315,7 @@ public class TestModifyTableProcedure extends TestTableDDLProcedureBase { new ModifyTableProcedure(procExec.getEnvironment(), htd)); // Restart the executor and rollback the step twice - int numberOfSteps = 1; // failing at pre operation + int numberOfSteps = 0; // failing at pre operation MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps); // cf2 should not be present diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java index f21fe0b..94c0fd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java @@ -18,14 +18,17 @@ package org.apache.hadoop.hbase.master.procedure; -import java.util.Random; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.util.List; +import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ProcedureInfo; @@ -43,17 +46,19 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; import static org.junit.Assert.*; @Category({MasterTests.class, MediumTests.class}) public class TestProcedureAdmin { private static final Log LOG = LogFactory.getLog(TestProcedureAdmin.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); + @Rule public TestName name = new TestName(); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - @Rule - public TestName name = new TestName(); private static void setupConf(Configuration conf) { conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java index 479b206..2201763 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java @@ -18,11 +18,17 @@ package org.apache.hadoop.hbase.master.procedure; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ProcedureInfo; @@ -35,7 +41,6 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -46,6 +51,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -55,6 +61,8 @@ import static org.junit.Assert.assertTrue; @Category({MasterTests.class, MediumTests.class}) public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestRestoreSnapshotProcedure.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); protected final TableName snapshotTableName = TableName.valueOf("testRestoreSnapshot"); protected final byte[] CF1 = Bytes.toBytes("cf1"); @@ -201,8 +209,7 @@ public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase { new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot)); // Restart the executor and execute the step twice - int numberOfSteps = RestoreSnapshotState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); resetProcExecutorTestingKillFlag(); validateSnapshotRestore(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java index 089872b..0dcaf45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java @@ -19,48 +19,58 @@ package org.apache.hadoop.hbase.master.procedure; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.TestTableName; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -/** - * It used to first run with DLS and then DLR but HBASE-12751 broke DLR so we disabled it here. - */ -@Category(LargeTests.class) -@RunWith(Parameterized.class) +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; + +@Category({MasterTests.class, LargeTests.class}) public class TestServerCrashProcedure { - // Ugly junit parameterization. I just want to pass false and then true but seems like needs - // to return sequences of two-element arrays. - @Parameters(name = "{index}: setting={0}") - public static Collection data() { - return Arrays.asList(new Object[] [] {{Boolean.FALSE, -1}}); - } + private static final Log LOG = LogFactory.getLog(TestServerCrashProcedure.class); + + private HBaseTestingUtility util; - private final HBaseTestingUtility util = new HBaseTestingUtility(); + private void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + conf.set("hbase.balancer.tablesOnMaster", "none"); + conf.setInt("hbase.client.retries.number", 3); + } @Before public void setup() throws Exception { + this.util = new HBaseTestingUtility(); + setupConf(this.util.getConfiguration()); this.util.startMiniCluster(3); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate( this.util.getHBaseCluster().getMaster().getMasterProcedureExecutor(), false); @@ -71,15 +81,25 @@ public class TestServerCrashProcedure { MiniHBaseCluster cluster = this.util.getHBaseCluster(); HMaster master = cluster == null? null: cluster.getMaster(); if (master != null && master.getMasterProcedureExecutor() != null) { - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), - false); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate( + master.getMasterProcedureExecutor(), false); } this.util.shutdownMiniCluster(); } - public TestServerCrashProcedure(final Boolean b, final int ignore) { - this.util.getConfiguration().setBoolean("hbase.master.distributed.log.replay", b); - this.util.getConfiguration().setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + + @Test(timeout=60000) + public void testCrashTargetRs() throws Exception { + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOnRsWithMeta() throws Exception { + testRecoveryAndDoubleExecution(true); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOnRsWithoutMeta() throws Exception { + testRecoveryAndDoubleExecution(false); } /** @@ -87,43 +107,57 @@ public class TestServerCrashProcedure { * needed state. * @throws Exception */ - @Test(timeout = 300000) - public void testRecoveryAndDoubleExecutionOnline() throws Exception { - final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); - this.util.createTable(tableName, HBaseTestingUtility.COLUMNS, - HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); - try (Table t = this.util.getConnection().getTable(tableName)) { + private void testRecoveryAndDoubleExecution(final boolean carryingMeta) throws Exception { + final TableName tableName = TableName.valueOf( + "testRecoveryAndDoubleExecution-carryingMeta-" + carryingMeta); + final Table t = this.util.createTable(tableName, HBaseTestingUtility.COLUMNS, + HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); + try { // Load the table with a bit of data so some logs to split and some edits in each region. this.util.loadTable(t, HBaseTestingUtility.COLUMNS[0]); - int count = util.countRows(t); + + final int count = util.countRows(t); + assertTrue("expected some rows", count > 0); + final String checksum = util.checksumRows(t); + // Run the procedure executor outside the master so we can mess with it. Need to disable // Master's running of the server crash processing. - HMaster master = this.util.getHBaseCluster().getMaster(); + final HMaster master = this.util.getHBaseCluster().getMaster(); final ProcedureExecutor procExec = master.getMasterProcedureExecutor(); master.setServerCrashProcessingEnabled(false); - // Kill a server. Master will notice but do nothing other than add it to list of dead servers. - HRegionServer hrs = this.util.getHBaseCluster().getRegionServer(0); - boolean carryingMeta = master.getAssignmentManager().isCarryingMeta(hrs.getServerName()); - this.util.getHBaseCluster().killRegionServer(hrs.getServerName()); - hrs.join(); - // Wait until the expiration of the server has arrived at the master. We won't process it - // by queuing a ServerCrashProcedure because we have disabled crash processing... but wait - // here so ServerManager gets notice and adds expired server to appropriate queues. - while (!master.getServerManager().isServerDead(hrs.getServerName())) Threads.sleep(10); + + // find the first server that match the request and executes the test + ServerName rsToKill = null; + for (HRegionInfo hri: util.getHBaseAdmin().getTableRegions(tableName)) { + final ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(util, hri); + if (AssignmentTestingUtil.isServerHoldingMeta(util, serverName) == carryingMeta) { + rsToKill = serverName; + break; + } + } + // kill the RS + AssignmentTestingUtil.killRs(util, rsToKill); + // Now, reenable processing else we can't get a lock on the ServerCrashProcedure. master.setServerCrashProcessingEnabled(true); + // Do some of the master processing of dead servers so when SCP runs, it has expected 'state'. - master.getServerManager().moveFromOnelineToDeadServers(hrs.getServerName()); + master.getServerManager().moveFromOnelineToDeadServers(rsToKill); + // Enable test flags and then queue the crash procedure. ProcedureTestingUtility.waitNoProcedureRunning(procExec); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); - long procId = - procExec.submitProcedure(new ServerCrashProcedure( - procExec.getEnvironment(), hrs.getServerName(), true, carryingMeta)); + long procId = procExec.submitProcedure(new ServerCrashProcedure( + procExec.getEnvironment(), rsToKill, true, carryingMeta)); + // Now run through the procedure twice crashing the executor on each step... MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); + // Assert all data came back. assertEquals(count, util.countRows(t)); + assertEquals(checksum, util.checksumRows(t)); + } finally { + t.close(); } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java index c3b910e..643cc99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestSplitTableRegionProcedure.java @@ -28,6 +28,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -46,7 +47,6 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -58,10 +58,13 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; @Category({MasterTests.class, MediumTests.class}) public class TestSplitTableRegionProcedure { private static final Log LOG = LogFactory.getLog(TestSplitTableRegionProcedure.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -330,8 +333,7 @@ public class TestSplitTableRegionProcedure { new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey)); // Restart the executor and execute the step twice - int numberOfSteps = SplitTableRegionState.values().length; - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); verify(tableName, splitRowNum); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java index f453a67..f7b4100 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; import org.junit.After; @@ -75,6 +76,10 @@ public abstract class TestTableDDLProcedureBase { } protected ProcedureExecutor getMasterProcedureExecutor() { - return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + return getMaster().getMasterProcedureExecutor(); + } + + protected HMaster getMaster() { + return UTIL.getHBaseCluster().getMaster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java index 6d9475f..22583d3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hbase.master.procedure; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.TableName; @@ -34,6 +38,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -41,6 +46,8 @@ import static org.junit.Assert.assertTrue; @Category({MasterTests.class, MediumTests.class}) public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { private static final Log LOG = LogFactory.getLog(TestTruncateTableProcedure.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); @Rule public TestName name = new TestName(); @@ -171,9 +178,7 @@ public class TestTruncateTableProcedure extends TestTableDDLProcedureBase { new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits)); // Restart the executor and execute the step twice - // NOTE: the 7 (number of TruncateTableState steps) is hardcoded, - // so you have to look at this test at least once when you add a new step. - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, 7); + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); UTIL.waitUntilAllRegionsAssigned(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index 0190b8f..d410781 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.TableNamespaceManager; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.QuotaExceededException; @@ -730,7 +730,7 @@ public class TestNamespaceAuditor { ADMIN.createTable(tableDescOne); ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); } - + @Test(expected = QuotaExceededException.class) public void testCloneSnapshotQuotaExceed() throws Exception { String nsp = prefix + "_testTableQuotaExceedWithCloneSnapshot"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java index 91279b6..1f4f68a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleMasterProcedureManager.java @@ -35,7 +35,7 @@ import org.apache.zookeeper.KeeperException; public class SimpleMasterProcedureManager extends MasterProcedureManager { - public static final String SIMPLE_SIGNATURE = "simle_test"; + public static final String SIMPLE_SIGNATURE = "simple_test"; public static final String SIMPLE_DATA = "simple_test_data"; private static final Log LOG = LogFactory.getLog(SimpleMasterProcedureManager.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java index c7b6c7c..9f3ae8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitThread.java @@ -77,10 +77,10 @@ public class TestCompactSplitThread { // block writes if we get to blockingStoreFiles store files conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles); // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner) - conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 3); - conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 4); - conf.setInt(CompactSplitThread.SPLIT_THREADS, 5); - conf.setInt(CompactSplitThread.MERGE_THREADS, 6); + conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 3); + conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 4); + conf.setInt(CompactSplit.SPLIT_THREADS, 5); + conf.setInt(CompactSplit.MERGE_THREADS, 6); } @After @@ -116,10 +116,10 @@ public class TestCompactSplitThread { assertEquals(6, regionServer.compactSplitThread.getMergeThreadNum()); // change bigger configurations and do online update - conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 4); - conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 5); - conf.setInt(CompactSplitThread.SPLIT_THREADS, 6); - conf.setInt(CompactSplitThread.MERGE_THREADS, 7); + conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 4); + conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 5); + conf.setInt(CompactSplit.SPLIT_THREADS, 6); + conf.setInt(CompactSplit.MERGE_THREADS, 7); try { regionServer.compactSplitThread.onConfigurationChange(conf); } catch (IllegalArgumentException iae) { @@ -133,10 +133,10 @@ public class TestCompactSplitThread { assertEquals(7, regionServer.compactSplitThread.getMergeThreadNum()); // change smaller configurations and do online update - conf.setInt(CompactSplitThread.LARGE_COMPACTION_THREADS, 2); - conf.setInt(CompactSplitThread.SMALL_COMPACTION_THREADS, 3); - conf.setInt(CompactSplitThread.SPLIT_THREADS, 4); - conf.setInt(CompactSplitThread.MERGE_THREADS, 5); + conf.setInt(CompactSplit.LARGE_COMPACTION_THREADS, 2); + conf.setInt(CompactSplit.SMALL_COMPACTION_THREADS, 3); + conf.setInt(CompactSplit.SPLIT_THREADS, 4); + conf.setInt(CompactSplit.MERGE_THREADS, 5); try { regionServer.compactSplitThread.onConfigurationChange(conf); } catch (IllegalArgumentException iae) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index bc51c41..4f6b9e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -294,7 +294,7 @@ public class TestCompaction { // setup a compact/split thread on a mock server HRegionServer mockServer = Mockito.mock(HRegionServer.class); Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf()); - CompactSplitThread thread = new CompactSplitThread(mockServer); + CompactSplit thread = new CompactSplit(mockServer); Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread); // setup a region/store with some files @@ -318,7 +318,7 @@ public class TestCompaction { // setup a compact/split thread on a mock server HRegionServer mockServer = Mockito.mock(HRegionServer.class); Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf()); - CompactSplitThread thread = new CompactSplitThread(mockServer); + CompactSplit thread = new CompactSplit(mockServer); Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread); // setup a region/store with some files @@ -357,7 +357,7 @@ public class TestCompaction { /** * HBASE-7947: Regression test to ensure adding to the correct list in the - * {@link CompactSplitThread} + * {@link CompactSplit} * @throws Exception on failure */ @Test @@ -365,7 +365,7 @@ public class TestCompaction { // setup a compact/split thread on a mock server HRegionServer mockServer = Mockito.mock(HRegionServer.class); Mockito.when(mockServer.getConfiguration()).thenReturn(r.getBaseConf()); - CompactSplitThread thread = new CompactSplitThread(mockServer); + CompactSplit thread = new CompactSplit(mockServer); Mockito.when(mockServer.getCompactSplitThread()).thenReturn(thread); // setup a region/store with some files @@ -550,7 +550,7 @@ public class TestCompaction { when(mockServer.isStopped()).thenReturn(false); when(mockServer.getConfiguration()).thenReturn(conf); when(mockServer.getChoreService()).thenReturn(new ChoreService("test")); - CompactSplitThread cst = new CompactSplitThread(mockServer); + CompactSplit cst = new CompactSplit(mockServer); when(mockServer.getCompactSplitThread()).thenReturn(cst); //prevent large compaction thread pool stealing job from small compaction queue. cst.shutdownLongCompactions(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 88bbffb..0aa39f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -107,8 +107,8 @@ public class TestHRegionFileSystem { // alter through setting HStore#BLOCK_STORAGE_POLICY_KEY in HColumnDescriptor hcdA.setValue(HStore.BLOCK_STORAGE_POLICY_KEY, "ONE_SSD"); admin.modifyColumnFamily(TABLE_NAME, hcdA); - while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .isRegionsInTransition()) { + while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(). + getRegionStates().hasRegionsInTransition()) { Thread.sleep(200); LOG.debug("Waiting on table to finish schema altering"); } @@ -117,7 +117,7 @@ public class TestHRegionFileSystem { hcdB.setStoragePolicy("ALL_SSD"); admin.modifyColumnFamily(TABLE_NAME, hcdB); while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates() - .isRegionsInTransition()) { + .hasRegionsInTransition()) { Thread.sleep(200); LOG.debug("Waiting on table to finish schema altering"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 3e6d180..de56a40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -56,12 +56,12 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.exceptions.MergeRegionException; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; @@ -174,7 +174,7 @@ public class TestRegionMergeTransactionOnCluster { assertTrue(regionStates.isRegionInState(hri, State.MERGED)); // We should not be able to unassign it either - am.unassign(hri, null); + am.unassign(hri); assertFalse("Merged region can't be unassigned", regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri, State.MERGED)); @@ -326,12 +326,12 @@ public class TestRegionMergeTransactionOnCluster { try { // Create table and load data. Table table = createTableAndLoadData(MASTER, tableName); - RegionStates regionStates = MASTER.getAssignmentManager().getRegionStates(); - List regions = regionStates.getRegionsOfTable(tableName); + AssignmentManager am = MASTER.getAssignmentManager(); + List regions = am.getRegionStates().getRegionsOfTable(tableName); // Fake offline one region HRegionInfo a = regions.get(0); HRegionInfo b = regions.get(1); - regionStates.regionOffline(a); + am.offlineRegion(a); try { // Merge offline region. Region a is offline here admin.mergeRegionsAsync(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false) @@ -552,7 +552,7 @@ public class TestRegionMergeTransactionOnCluster { if (enabled.get() && req.getTransition(0).getTransitionCode() == TransitionCode.READY_TO_MERGE && !resp.hasErrorMessage()) { RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates(); - for (RegionState regionState: regionStates.getRegionsInTransition()) { + for (RegionState regionState: regionStates.getRegionsStateInTransition()) { // Find the merging_new region and remove it if (regionState.isMergingNew()) { regionStates.deleteRegion(regionState.getRegion()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 966d794..3ebc3f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -39,7 +39,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -69,20 +69,20 @@ import org.apache.hadoop.hbase.client.TestReplicasClient.SlowMeCopro; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -103,6 +103,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.junit.rules.TestRule; /** * The below tests are testing split region against a running cluster @@ -112,6 +113,8 @@ import org.junit.rules.TestName; public class TestSplitTransactionOnCluster { private static final Log LOG = LogFactory.getLog(TestSplitTransactionOnCluster.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); private Admin admin = null; private MiniHBaseCluster cluster = null; private static final int NB_SERVERS = 3; @@ -150,8 +153,7 @@ public class TestSplitTransactionOnCluster { throws IOException, InterruptedException { assertEquals(1, regions.size()); HRegionInfo hri = regions.get(0).getRegionInfo(); - cluster.getMaster().getAssignmentManager() - .waitOnRegionToClearRegionsInTransition(hri, 600000); + cluster.getMaster().getAssignmentManager().waitForAssignment(hri, 600000); return hri; } @@ -215,7 +217,7 @@ public class TestSplitTransactionOnCluster { observer.latch.await(); LOG.info("Waiting for region to come out of RIT"); - cluster.getMaster().getAssignmentManager().waitOnRegionToClearRegionsInTransition(hri, 60000); + cluster.getMaster().getAssignmentManager().waitForAssignment(hri, 60000); } finally { admin.setBalancerRunning(true, false); master.setCatalogJanitorEnabled(true); @@ -321,8 +323,9 @@ public class TestSplitTransactionOnCluster { assertEquals(regionCount, ProtobufUtil.getOnlineRegions( server.getRSRpcServices()).size()); } + /* TODO!!! regionStates.regionOnline(hri, server.getServerName()); - +*/ // Now try splitting and it should work. split(hri, server, regionCount); // Get daughters @@ -697,7 +700,7 @@ public class TestSplitTransactionOnCluster { assertTrue(regionStates.isRegionInState(hri, State.SPLIT)); // We should not be able to unassign it either - am.unassign(hri, null); + am.unassign(hri); assertFalse("Split region can't be unassigned", regionStates.isRegionInTransition(hri)); assertTrue(regionStates.isRegionInState(hri, State.SPLIT)); @@ -939,11 +942,14 @@ public class TestSplitTransactionOnCluster { if (enabled.get() && req.getTransition(0).getTransitionCode().equals( TransitionCode.READY_TO_SPLIT) && !resp.hasErrorMessage()) { RegionStates regionStates = myMaster.getAssignmentManager().getRegionStates(); - for (RegionState regionState: regionStates.getRegionsInTransition()) { + for (RegionStates.RegionStateNode regionState: + regionStates.getRegionsInTransition()) { + /* TODO!!!! // Find the merging_new region and remove it if (regionState.isSplittingNew()) { regionStates.deleteRegion(regionState.getRegion()); } + */ } } return resp; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java index fabf6d2..c138747 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.client.Table; @@ -31,11 +32,15 @@ import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; @Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestAsyncLogRolling extends AbstractTestLogRolling { + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 19c534e..9c4759e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -58,13 +59,17 @@ import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; @Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestLogRolling extends AbstractTestLogRolling { private static final Log LOG = LogFactory.getLog(TestLogRolling.class); + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java index 5b8b404..d31d8cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureAsyncWALReplay.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -25,10 +26,14 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider.AsyncWriter; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; @Category({ RegionServerTests.class, MediumTests.class }) public class TestSecureAsyncWALReplay extends TestAsyncWALReplay { + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index e2aa580..d93c782 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -22,16 +22,21 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; @Category({ RegionServerTests.class, MediumTests.class }) public class TestWALReplay extends AbstractTestWALReplay { + @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()). + withLookingForStuckThread(true).build(); @BeforeClass public static void setUpBeforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index 574d546..8459454 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -61,7 +61,8 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.mob.MobFileName; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @@ -91,6 +92,7 @@ public class BaseTestHBaseFsck { protected final static String FAM_STR = "fam"; protected final static byte[] FAM = Bytes.toBytes(FAM_STR); protected final static int REGION_ONLINE_TIMEOUT = 800; + protected static AssignmentManager assignmentManager; protected static RegionStates regionStates; protected static ExecutorService tableExecutorService; protected static ScheduledThreadPoolExecutor hbfsckExecutorService; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index b04689c..720878b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.io.hfile.TestHFile; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index 0e3355a..7c28faa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -45,9 +45,10 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.io.hfile.TestHFile; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.RegionStates; +import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; +import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -1626,13 +1627,11 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { splitR.createDaughterRegions(env); AssignmentManager am = cluster.getMaster().getAssignmentManager(); - for (RegionState state : am.getRegionStates().getRegionsInTransition()) { - am.regionOffline(state.getRegion()); + for (RegionStateNode state : am.getRegionStates().getRegionsInTransition()) { + am.offlineRegion(state.getRegionInfo()); } - - Map regionsMap = new HashMap(); - regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName()); - am.assign(regionsMap); + am.moveAsync(new RegionPlan(regions.get(0).getRegionInfo(), + regionServer.getServerName(), regionServer.getServerName())); am.waitForAssignment(regions.get(0).getRegionInfo()); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java index 9b92a69..d1bf1ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckReplicas.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.AfterClass; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java index 99a41f5..bef0865 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java @@ -19,7 +19,13 @@ package org.apache.hadoop.hbase.util; -import com.google.common.collect.Multimap; +import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors; +import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertNoErrors; +import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.SynchronousQueue; @@ -48,8 +54,6 @@ import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -63,8 +67,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*; -import static org.junit.Assert.*; +import com.google.common.collect.Multimap; @Category({MiscTests.class, LargeTests.class}) public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { @@ -90,8 +93,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); - AssignmentManager assignmentManager = - TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); + assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); regionStates = assignmentManager.getRegionStates(); connection = (ClusterConnection) TEST_UTIL.getConnection(); @@ -120,7 +122,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { public void testFixAssignmentsWhenMETAinTransition() throws Exception { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); admin.closeRegion(cluster.getServerHoldingMeta(), HRegionInfo.FIRST_META_REGIONINFO); - regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO); + assignmentManager.offlineRegion(HRegionInfo.FIRST_META_REGIONINFO); new MetaTableLocator().deleteMetaLocation(cluster.getMaster().getZooKeeper()); assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); HBaseFsck hbck = doFsck(conf, true); @@ -405,7 +407,6 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { // Mess it up by creating an overlap MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - HMaster master = cluster.getMaster(); HRegionInfo hriOverlap1 = createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("AB")); TEST_UTIL.assignRegion(hriOverlap1); @@ -451,7 +452,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck { try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) { Put put = new Put(regionName); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(serverName.getHostAndPort())); + Bytes.toBytes(serverName.getAddress().toString())); meta.put(put); } -- 2.8.4 (Apple Git-73)