From 66063acc72c5c45132f896223d5ffe4075825352 Mon Sep 17 00:00:00 2001 From: Umesh Agashe Date: Fri, 23 Jun 2017 14:44:28 -0700 Subject: [PATCH] HBASE-18261 Created RecoverMetaProcedure and used it from ServerCrashProcedure and HMaster.finishActiveMasterInitialization(). This procedure can be used from any code before accessing meta, to initialize/ recover meta Change-Id: If549fa67451c12cd7b4a3fdce6fab9673e45dfea --- .../hbase/procedure2/StateMachineProcedure.java | 26 + .../protobuf/generated/MasterProcedureProtos.java | 1514 +++++++++++++++----- .../src/main/protobuf/MasterProcedure.proto | 11 + .../org/apache/hadoop/hbase/master/HMaster.java | 39 +- .../hadoop/hbase/master/MasterMetaBootstrap.java | 73 +- .../apache/hadoop/hbase/master/MasterServices.java | 5 + .../hadoop/hbase/master/MasterWalManager.java | 2 +- .../hbase/master/assignment/AssignmentManager.java | 2 + .../master/procedure/RecoverMetaProcedure.java | 252 ++++ .../master/procedure/ServerCrashProcedure.java | 56 +- .../hbase/master/MockNoopMasterServices.java | 9 +- .../hadoop/hbase/master/TestMasterNoCluster.java | 2 +- .../procedure/MasterProcedureTestingUtility.java | 11 +- .../master/procedure/TestServerCrashProcedure.java | 28 +- 14 files changed, 1561 insertions(+), 469 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index becd9b7f516dfab221d2f24d8e83ab4cc0149db5..5de50668fa7ec6cc0c240a30c920b02d8d4b1cce 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -59,6 +59,20 @@ public abstract class StateMachineProcedure private List> subProcList = null; + protected final int getCycles() { + return cycles; + } + + /** + * Cycles on same state. Good for figuring if we are stuck. + */ + private int cycles = 0; + + /** + * Ordinal of the previous state. So we can tell if we are progressing or not. + */ + private int previousState; + protected enum Flow { HAS_MORE_STATE, NO_MORE_STATE, @@ -152,6 +166,18 @@ public abstract class StateMachineProcedure if (stateCount == 0) { setNextState(getStateId(state)); } + + if (LOG.isTraceEnabled()) { + LOG.trace(state + " " + this + "; cycles=" + this.cycles); + } + // Keep running count of cycles + if (getStateId(state) != this.previousState) { + this.previousState = getStateId(state); + this.cycles = 0; + } else { + this.cycles++; + } + stateFlow = executeFromState(env, state); if (!hasMoreState()) setNextState(EOF_STATE); if (subProcList != null && !subProcList.isEmpty()) { diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java index 0ec9b22953fa6b7d1e2204a7d144f8b7936c20a0..853c177539ad0092d3840ad71e79f55ccb05aee4 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java @@ -2345,6 +2345,96 @@ public final class MasterProcedureProtos { } /** + * Protobuf enum {@code hbase.pb.RecoverMetaState} + */ + public enum RecoverMetaState + implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum { + /** + * RECOVER_META_SPLIT_LOGS = 1; + */ + RECOVER_META_SPLIT_LOGS(1), + /** + * RECOVER_META_ASSIGN_REGIONS = 2; + */ + RECOVER_META_ASSIGN_REGIONS(2), + ; + + /** + * RECOVER_META_SPLIT_LOGS = 1; + */ + public static final int RECOVER_META_SPLIT_LOGS_VALUE = 1; + /** + * RECOVER_META_ASSIGN_REGIONS = 2; + */ + public static final int RECOVER_META_ASSIGN_REGIONS_VALUE = 2; + + + public final int getNumber() { + return value; + } + + /** + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RecoverMetaState valueOf(int value) { + return forNumber(value); + } + + public static RecoverMetaState forNumber(int value) { + switch (value) { + case 1: return RECOVER_META_SPLIT_LOGS; + case 2: return RECOVER_META_ASSIGN_REGIONS; + default: return null; + } + } + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap< + RecoverMetaState> internalValueMap = + new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap() { + public RecoverMetaState findValueByNumber(int number) { + return RecoverMetaState.forNumber(number); + } + }; + + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(18); + } + + private static final RecoverMetaState[] VALUES = values(); + + public static RecoverMetaState valueOf( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private RecoverMetaState(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RecoverMetaState) + } + + /** * Protobuf enum {@code hbase.pb.RegionTransitionState} */ public enum RegionTransitionState @@ -2420,7 +2510,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(18); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(19); } private static final RegionTransitionState[] VALUES = values(); @@ -2510,7 +2600,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(19); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(20); } private static final MoveRegionState[] VALUES = values(); @@ -2609,7 +2699,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(20); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(21); } private static final GCRegionState[] VALUES = values(); @@ -2708,7 +2798,7 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(21); + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(22); } private static final GCMergedRegionsState[] VALUES = values(); @@ -25748,68 +25838,55 @@ public final class MasterProcedureProtos { } - public interface AssignRegionStateDataOrBuilder extends - // @@protoc_insertion_point(interface_extends:hbase.pb.AssignRegionStateData) + public interface RecoverMetaStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.RecoverMetaStateData) org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { /** - * required .hbase.pb.RegionTransitionState transition_state = 1; - */ - boolean hasTransitionState(); - /** - * required .hbase.pb.RegionTransitionState transition_state = 1; - */ - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState(); - - /** - * required .hbase.pb.RegionInfo region_info = 2; + * optional .hbase.pb.ServerName failed_meta_server = 1; */ - boolean hasRegionInfo(); + boolean hasFailedMetaServer(); /** - * required .hbase.pb.RegionInfo region_info = 2; + * optional .hbase.pb.ServerName failed_meta_server = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getFailedMetaServer(); /** - * required .hbase.pb.RegionInfo region_info = 2; + * optional .hbase.pb.ServerName failed_meta_server = 1; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFailedMetaServerOrBuilder(); /** - * optional bool force_new_plan = 3 [default = false]; + * optional bool should_split_wal = 2 [default = true]; */ - boolean hasForceNewPlan(); + boolean hasShouldSplitWal(); /** - * optional bool force_new_plan = 3 [default = false]; + * optional bool should_split_wal = 2 [default = true]; */ - boolean getForceNewPlan(); + boolean getShouldSplitWal(); /** - * optional .hbase.pb.ServerName target_server = 4; - */ - boolean hasTargetServer(); - /** - * optional .hbase.pb.ServerName target_server = 4; + * optional int32 replica_id = 3 [default = 0]; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer(); + boolean hasReplicaId(); /** - * optional .hbase.pb.ServerName target_server = 4; + * optional int32 replica_id = 3 [default = 0]; */ - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder(); + int getReplicaId(); } /** - * Protobuf type {@code hbase.pb.AssignRegionStateData} + * Protobuf type {@code hbase.pb.RecoverMetaStateData} */ - public static final class AssignRegionStateData extends + public static final class RecoverMetaStateData extends org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:hbase.pb.AssignRegionStateData) - AssignRegionStateDataOrBuilder { - // Use AssignRegionStateData.newBuilder() to construct. - private AssignRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + // @@protoc_insertion_point(message_implements:hbase.pb.RecoverMetaStateData) + RecoverMetaStateDataOrBuilder { + // Use RecoverMetaStateData.newBuilder() to construct. + private RecoverMetaStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { super(builder); } - private AssignRegionStateData() { - transitionState_ = 1; - forceNewPlan_ = false; + private RecoverMetaStateData() { + shouldSplitWal_ = true; + replicaId_ = 0; } @java.lang.Override @@ -25817,7 +25894,7 @@ public final class MasterProcedureProtos { getUnknownFields() { return this.unknownFields; } - private AssignRegionStateData( + private RecoverMetaStateData( org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { @@ -25840,46 +25917,27 @@ public final class MasterProcedureProtos { } break; } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState value = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - transitionState_ = rawValue; - } - break; - } - case 18: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - subBuilder = regionInfo_.toBuilder(); + case 10: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = failedMetaServer_.toBuilder(); } - regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + failedMetaServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); if (subBuilder != null) { - subBuilder.mergeFrom(regionInfo_); - regionInfo_ = subBuilder.buildPartial(); + subBuilder.mergeFrom(failedMetaServer_); + failedMetaServer_ = subBuilder.buildPartial(); } + bitField0_ |= 0x00000001; + break; + } + case 16: { bitField0_ |= 0x00000002; + shouldSplitWal_ = input.readBool(); break; } case 24: { bitField0_ |= 0x00000004; - forceNewPlan_ = input.readBool(); - break; - } - case 34: { - org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; - if (((bitField0_ & 0x00000008) == 0x00000008)) { - subBuilder = targetServer_.toBuilder(); - } - targetServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(targetServer_); - targetServer_ = subBuilder.buildPartial(); - } - bitField0_ |= 0x00000008; + replicaId_ = input.readInt32(); break; } } @@ -25896,88 +25954,66 @@ public final class MasterProcedureProtos { } public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_descriptor; + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RecoverMetaStateData_descriptor; } protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RecoverMetaStateData_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.Builder.class); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.Builder.class); } private int bitField0_; - public static final int TRANSITION_STATE_FIELD_NUMBER = 1; - private int transitionState_; + public static final int FAILED_META_SERVER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName failedMetaServer_; /** - * required .hbase.pb.RegionTransitionState transition_state = 1; + * optional .hbase.pb.ServerName failed_meta_server = 1; */ - public boolean hasTransitionState() { + public boolean hasFailedMetaServer() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.RegionTransitionState transition_state = 1; - */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState() { - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState result = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(transitionState_); - return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.REGION_TRANSITION_QUEUE : result; - } - - public static final int REGION_INFO_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; - /** - * required .hbase.pb.RegionInfo region_info = 2; - */ - public boolean hasRegionInfo() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required .hbase.pb.RegionInfo region_info = 2; + * optional .hbase.pb.ServerName failed_meta_server = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { - return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getFailedMetaServer() { + return failedMetaServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : failedMetaServer_; } /** - * required .hbase.pb.RegionInfo region_info = 2; + * optional .hbase.pb.ServerName failed_meta_server = 1; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { - return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFailedMetaServerOrBuilder() { + return failedMetaServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : failedMetaServer_; } - public static final int FORCE_NEW_PLAN_FIELD_NUMBER = 3; - private boolean forceNewPlan_; + public static final int SHOULD_SPLIT_WAL_FIELD_NUMBER = 2; + private boolean shouldSplitWal_; /** - * optional bool force_new_plan = 3 [default = false]; + * optional bool should_split_wal = 2 [default = true]; */ - public boolean hasForceNewPlan() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public boolean hasShouldSplitWal() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bool force_new_plan = 3 [default = false]; + * optional bool should_split_wal = 2 [default = true]; */ - public boolean getForceNewPlan() { - return forceNewPlan_; + public boolean getShouldSplitWal() { + return shouldSplitWal_; } - public static final int TARGET_SERVER_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName targetServer_; + public static final int REPLICA_ID_FIELD_NUMBER = 3; + private int replicaId_; /** - * optional .hbase.pb.ServerName target_server = 4; - */ - public boolean hasTargetServer() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .hbase.pb.ServerName target_server = 4; + * optional int32 replica_id = 3 [default = 0]; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer() { - return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + public boolean hasReplicaId() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional .hbase.pb.ServerName target_server = 4; + * optional int32 replica_id = 3 [default = 0]; */ - public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder() { - return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + public int getReplicaId() { + return replicaId_; } private byte memoizedIsInitialized = -1; @@ -25986,20 +26022,8 @@ public final class MasterProcedureProtos { if (isInitialized == 1) return true; if (isInitialized == 0) return false; - if (!hasTransitionState()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRegionInfo()) { - memoizedIsInitialized = 0; - return false; - } - if (!getRegionInfo().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (hasTargetServer()) { - if (!getTargetServer().isInitialized()) { + if (hasFailedMetaServer()) { + if (!getFailedMetaServer().isInitialized()) { memoizedIsInitialized = 0; return false; } @@ -26011,16 +26035,13 @@ public final class MasterProcedureProtos { public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, transitionState_); + output.writeMessage(1, getFailedMetaServer()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, getRegionInfo()); + output.writeBool(2, shouldSplitWal_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, forceNewPlan_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeMessage(4, getTargetServer()); + output.writeInt32(3, replicaId_); } unknownFields.writeTo(output); } @@ -26032,19 +26053,15 @@ public final class MasterProcedureProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeEnumSize(1, transitionState_); + .computeMessageSize(1, getFailedMetaServer()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getRegionInfo()); + .computeBoolSize(2, shouldSplitWal_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeBoolSize(3, forceNewPlan_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream - .computeMessageSize(4, getTargetServer()); + .computeInt32Size(3, replicaId_); } size += unknownFields.getSerializedSize(); memoizedSize = size; @@ -26057,24 +26074,861 @@ public final class MasterProcedureProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData)) { + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData)) { return super.equals(obj); } - org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData) obj; + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData) obj; boolean result = true; - result = result && (hasTransitionState() == other.hasTransitionState()); - if (hasTransitionState()) { - result = result && transitionState_ == other.transitionState_; + result = result && (hasFailedMetaServer() == other.hasFailedMetaServer()); + if (hasFailedMetaServer()) { + result = result && getFailedMetaServer() + .equals(other.getFailedMetaServer()); } - result = result && (hasRegionInfo() == other.hasRegionInfo()); - if (hasRegionInfo()) { - result = result && getRegionInfo() - .equals(other.getRegionInfo()); + result = result && (hasShouldSplitWal() == other.hasShouldSplitWal()); + if (hasShouldSplitWal()) { + result = result && (getShouldSplitWal() + == other.getShouldSplitWal()); } - result = result && (hasForceNewPlan() == other.hasForceNewPlan()); - if (hasForceNewPlan()) { - result = result && (getForceNewPlan() + result = result && (hasReplicaId() == other.hasReplicaId()); + if (hasReplicaId()) { + result = result && (getReplicaId() + == other.getReplicaId()); + } + result = result && unknownFields.equals(other.unknownFields); + return result; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasFailedMetaServer()) { + hash = (37 * hash) + FAILED_META_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getFailedMetaServer().hashCode(); + } + if (hasShouldSplitWal()) { + hash = (37 * hash) + SHOULD_SPLIT_WAL_FIELD_NUMBER; + hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean( + getShouldSplitWal()); + } + if (hasReplicaId()) { + hash = (37 * hash) + REPLICA_ID_FIELD_NUMBER; + hash = (53 * hash) + getReplicaId(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom(byte[] data) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom( + byte[] data, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseDelimitedFrom( + java.io.InputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parseFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RecoverMetaStateData} + */ + public static final class Builder extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:hbase.pb.RecoverMetaStateData) + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateDataOrBuilder { + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RecoverMetaStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RecoverMetaStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 + .alwaysUseFieldBuilders) { + getFailedMetaServerFieldBuilder(); + } + } + public Builder clear() { + super.clear(); + if (failedMetaServerBuilder_ == null) { + failedMetaServer_ = null; + } else { + failedMetaServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + shouldSplitWal_ = true; + bitField0_ = (bitField0_ & ~0x00000002); + replicaId_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RecoverMetaStateData_descriptor; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData build() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData buildPartial() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (failedMetaServerBuilder_ == null) { + result.failedMetaServer_ = failedMetaServer_; + } else { + result.failedMetaServer_ = failedMetaServerBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.shouldSplitWal_ = shouldSplitWal_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.replicaId_ = replicaId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder clone() { + return (Builder) super.clone(); + } + public Builder setField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.setField(field, value); + } + public Builder clearField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) { + return (Builder) super.clearField(field); + } + public Builder clearOneof( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return (Builder) super.clearOneof(oneof); + } + public Builder setRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + int index, Object value) { + return (Builder) super.setRepeatedField(field, index, value); + } + public Builder addRepeatedField( + org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field, + Object value) { + return (Builder) super.addRepeatedField(field, value); + } + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData) { + return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData other) { + if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData.getDefaultInstance()) return this; + if (other.hasFailedMetaServer()) { + mergeFailedMetaServer(other.getFailedMetaServer()); + } + if (other.hasShouldSplitWal()) { + setShouldSplitWal(other.getShouldSplitWal()); + } + if (other.hasReplicaId()) { + setReplicaId(other.getReplicaId()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + public final boolean isInitialized() { + if (hasFailedMetaServer()) { + if (!getFailedMetaServer().isInitialized()) { + return false; + } + } + return true; + } + + public Builder mergeFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName failedMetaServer_ = null; + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> failedMetaServerBuilder_; + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public boolean hasFailedMetaServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getFailedMetaServer() { + if (failedMetaServerBuilder_ == null) { + return failedMetaServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : failedMetaServer_; + } else { + return failedMetaServerBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public Builder setFailedMetaServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (failedMetaServerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + failedMetaServer_ = value; + onChanged(); + } else { + failedMetaServerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public Builder setFailedMetaServer( + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (failedMetaServerBuilder_ == null) { + failedMetaServer_ = builderForValue.build(); + onChanged(); + } else { + failedMetaServerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public Builder mergeFailedMetaServer(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) { + if (failedMetaServerBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + failedMetaServer_ != null && + failedMetaServer_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + failedMetaServer_ = + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(failedMetaServer_).mergeFrom(value).buildPartial(); + } else { + failedMetaServer_ = value; + } + onChanged(); + } else { + failedMetaServerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public Builder clearFailedMetaServer() { + if (failedMetaServerBuilder_ == null) { + failedMetaServer_ = null; + onChanged(); + } else { + failedMetaServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getFailedMetaServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getFailedMetaServerFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFailedMetaServerOrBuilder() { + if (failedMetaServerBuilder_ != null) { + return failedMetaServerBuilder_.getMessageOrBuilder(); + } else { + return failedMetaServer_ == null ? + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : failedMetaServer_; + } + } + /** + * optional .hbase.pb.ServerName failed_meta_server = 1; + */ + private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getFailedMetaServerFieldBuilder() { + if (failedMetaServerBuilder_ == null) { + failedMetaServerBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3< + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + getFailedMetaServer(), + getParentForChildren(), + isClean()); + failedMetaServer_ = null; + } + return failedMetaServerBuilder_; + } + + private boolean shouldSplitWal_ = true; + /** + * optional bool should_split_wal = 2 [default = true]; + */ + public boolean hasShouldSplitWal() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool should_split_wal = 2 [default = true]; + */ + public boolean getShouldSplitWal() { + return shouldSplitWal_; + } + /** + * optional bool should_split_wal = 2 [default = true]; + */ + public Builder setShouldSplitWal(boolean value) { + bitField0_ |= 0x00000002; + shouldSplitWal_ = value; + onChanged(); + return this; + } + /** + * optional bool should_split_wal = 2 [default = true]; + */ + public Builder clearShouldSplitWal() { + bitField0_ = (bitField0_ & ~0x00000002); + shouldSplitWal_ = true; + onChanged(); + return this; + } + + private int replicaId_ ; + /** + * optional int32 replica_id = 3 [default = 0]; + */ + public boolean hasReplicaId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int32 replica_id = 3 [default = 0]; + */ + public int getReplicaId() { + return replicaId_; + } + /** + * optional int32 replica_id = 3 [default = 0]; + */ + public Builder setReplicaId(int value) { + bitField0_ |= 0x00000004; + replicaId_ = value; + onChanged(); + return this; + } + /** + * optional int32 replica_id = 3 [default = 0]; + */ + public Builder clearReplicaId() { + bitField0_ = (bitField0_ & ~0x00000004); + replicaId_ = 0; + onChanged(); + return this; + } + public final Builder setUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + public final Builder mergeUnknownFields( + final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:hbase.pb.RecoverMetaStateData) + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RecoverMetaStateData) + private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData(); + } + + public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser + PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() { + public RecoverMetaStateData parsePartialFrom( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + return new RecoverMetaStateData(input, extensionRegistry); + } + }; + + public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaStateData getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + public interface AssignRegionStateDataOrBuilder extends + // @@protoc_insertion_point(interface_extends:hbase.pb.AssignRegionStateData) + org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder { + + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + boolean hasTransitionState(); + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState(); + + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + boolean hasRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + /** + * optional bool force_new_plan = 3 [default = false]; + */ + boolean hasForceNewPlan(); + /** + * optional bool force_new_plan = 3 [default = false]; + */ + boolean getForceNewPlan(); + + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + boolean hasTargetServer(); + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer(); + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.AssignRegionStateData} + */ + public static final class AssignRegionStateData extends + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:hbase.pb.AssignRegionStateData) + AssignRegionStateDataOrBuilder { + // Use AssignRegionStateData.newBuilder() to construct. + private AssignRegionStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private AssignRegionStateData() { + transitionState_ = 1; + forceNewPlan_ = false; + } + + @java.lang.Override + public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AssignRegionStateData( + org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input, + org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException { + this(); + int mutable_bitField0_ = 0; + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields = + org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState value = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + transitionState_ = rawValue; + } + break; + } + case 18: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + forceNewPlan_ = input.readBool(); + break; + } + case 34: { + org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = targetServer_.toBuilder(); + } + targetServer_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(targetServer_); + targetServer_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_descriptor; + } + + protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData.Builder.class); + } + + private int bitField0_; + public static final int TRANSITION_STATE_FIELD_NUMBER = 1; + private int transitionState_; + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public boolean hasTransitionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.RegionTransitionState transition_state = 1; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState getTransitionState() { + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState result = org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.valueOf(transitionState_); + return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState.REGION_TRANSITION_QUEUE : result; + } + + public static final int REGION_INFO_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_; + } + + public static final int FORCE_NEW_PLAN_FIELD_NUMBER = 3; + private boolean forceNewPlan_; + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean hasForceNewPlan() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean getForceNewPlan() { + return forceNewPlan_; + } + + public static final int TARGET_SERVER_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName targetServer_; + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public boolean hasTargetServer() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getTargetServer() { + return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + } + /** + * optional .hbase.pb.ServerName target_server = 4; + */ + public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getTargetServerOrBuilder() { + return targetServer_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : targetServer_; + } + + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasTransitionState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasTargetServer()) { + if (!getTargetServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, transitionState_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, getRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, forceNewPlan_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, getTargetServer()); + } + unknownFields.writeTo(output); + } + + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeEnumSize(1, transitionState_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(2, getRegionInfo()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeBoolSize(3, forceNewPlan_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream + .computeMessageSize(4, getTargetServer()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AssignRegionStateData) obj; + + boolean result = true; + result = result && (hasTransitionState() == other.hasTransitionState()); + if (hasTransitionState()) { + result = result && transitionState_ == other.transitionState_; + } + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && (hasForceNewPlan() == other.hasForceNewPlan()); + if (hasForceNewPlan()) { + result = result && (getForceNewPlan() == other.getForceNewPlan()); } result = result && (hasTargetServer() == other.hasTargetServer()); @@ -30817,6 +31671,11 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RecoverMetaStateData_descriptor; + private static final + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_hbase_pb_RecoverMetaStateData_fieldAccessorTable; + private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_AssignRegionStateData_descriptor; private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -30945,158 +31804,163 @@ public final class MasterProcedureProtos { "\003(\0132\024.hbase.pb.RegionInfo\022.\n\020regions_ass" + "igned\030\004 \003(\0132\024.hbase.pb.RegionInfo\022\025\n\rcar" + "rying_meta\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001" + - "(\010:\004true\"\311\001\n\025AssignRegionStateData\0229\n\020tr" + - "ansition_state\030\001 \002(\0162\037.hbase.pb.RegionTr" + - "ansitionState\022)\n\013region_info\030\002 \002(\0132\024.hba" + - "se.pb.RegionInfo\022\035\n\016force_new_plan\030\003 \001(\010" + - ":\005false\022+\n\rtarget_server\030\004 \001(\0132\024.hbase.p", - "b.ServerName\"\365\001\n\027UnassignRegionStateData" + - "\0229\n\020transition_state\030\001 \002(\0162\037.hbase.pb.Re" + - "gionTransitionState\022)\n\013region_info\030\002 \002(\013" + - "2\024.hbase.pb.RegionInfo\0220\n\022destination_se" + - "rver\030\003 \001(\0132\024.hbase.pb.ServerName\022,\n\016host" + - "ing_server\030\005 \001(\0132\024.hbase.pb.ServerName\022\024" + - "\n\005force\030\004 \001(\010:\005false\"\237\001\n\023MoveRegionState" + - "Data\022)\n\013region_info\030\001 \001(\0132\024.hbase.pb.Reg" + - "ionInfo\022+\n\rsource_server\030\002 \002(\0132\024.hbase.p" + - "b.ServerName\0220\n\022destination_server\030\003 \002(\013", - "2\024.hbase.pb.ServerName\">\n\021GCRegionStateD" + - "ata\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" + - "onInfo\"\226\001\n\030GCMergedRegionsStateData\022&\n\010p" + - "arent_a\030\001 \002(\0132\024.hbase.pb.RegionInfo\022&\n\010p" + - "arent_b\030\002 \002(\0132\024.hbase.pb.RegionInfo\022*\n\014m" + - "erged_child\030\003 \002(\0132\024.hbase.pb.RegionInfo*" + - "\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE" + - "_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LA" + - "YOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033" + - "CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_", - "TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABL" + - "E_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022" + - "\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABL" + - "E_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE" + - "_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMO" + - "VE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELE" + - "TE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPER" + - "ATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIO" + - "NS\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_" + - "TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_", - "REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLE" + - "AR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_" + - "FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_MET" + - "A\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!" + - "\n\035TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020De" + - "leteTableState\022\036\n\032DELETE_TABLE_PRE_OPERA" + - "TION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020" + - "\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036D" + - "ELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELET" + - "E_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TAB", - "LE_POST_OPERATION\020\006*\320\001\n\024CreateNamespaceS" + - "tate\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CR" + - "EATE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n%CRE" + - "ATE_NAMESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032" + - "CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_N" + - "AMESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024Modify" + - "NamespaceState\022\034\n\030MODIFY_NAMESPACE_PREPA" + - "RE\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_TABLE" + - "\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024D" + - "eleteNamespaceState\022\034\n\030DELETE_NAMESPACE_", - "PREPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE_FRO" + - "M_NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_" + - "FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIR" + - "ECTORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NA" + - "MESPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyState" + - "\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_C" + - "OLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLU" + - "MN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n A" + - "DD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD" + - "_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027", - "ModifyColumnFamilyState\022 \n\034MODIFY_COLUMN" + - "_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMIL" + - "Y_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMIL" + - "Y_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_CO" + - "LUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_C" + - "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027De" + - "leteColumnFamilyState\022 \n\034DELETE_COLUMN_F" + - "AMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_" + - "PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_" + - "UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLU", - "MN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_C" + - "OLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_" + - "COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020E" + - "nableTableState\022\030\n\024ENABLE_TABLE_PREPARE\020" + - "\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENA" + - "BLE_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n " + - "ENABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$EN" + - "ABLE_TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033" + - "ENABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Disabl" + - "eTableState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037", - "\n\033DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISAB" + - "LE_TABLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"" + - "DISABLE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&" + - "DISABLE_TABLE_SET_DISABLED_TABLE_STATE\020\005" + - "\022 \n\034DISABLE_TABLE_POST_OPERATION\020\006*\206\002\n\022C" + - "loneSnapshotState\022 \n\034CLONE_SNAPSHOT_PRE_" + - "OPERATION\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_FS_L" + - "AYOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_TO_META\020\003\022" + - "!\n\035CLONE_SNAPSHOT_ASSIGN_REGIONS\020\004\022$\n CL" + - "ONE_SNAPSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035CLON", - "E_SNAPSHOT_POST_OPERATION\020\006\022\036\n\032CLONE_SNA" + - "PHOST_RESTORE_ACL\020\007*\322\001\n\024RestoreSnapshotS" + - "tate\022\"\n\036RESTORE_SNAPSHOT_PRE_OPERATION\020\001" + - "\022,\n(RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRI" + - "PTOR\020\002\022$\n RESTORE_SNAPSHOT_WRITE_FS_LAYO" + - "UT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE_META\020\004\022 " + - "\n\034RESTORE_SNAPSHOT_RESTORE_ACL\020\005*\376\001\n\033Dis" + - "patchMergingRegionsState\022$\n DISPATCH_MER" + - "GING_REGIONS_PREPARE\020\001\022*\n&DISPATCH_MERGI" + - "NG_REGIONS_PRE_OPERATION\020\002\0223\n/DISPATCH_M", - "ERGING_REGIONS_MOVE_REGION_TO_SAME_RS\020\003\022" + - "+\n\'DISPATCH_MERGING_REGIONS_DO_MERGE_IN_" + - "RS\020\004\022+\n\'DISPATCH_MERGING_REGIONS_POST_OP" + - "ERATION\020\005*\222\003\n\025SplitTableRegionState\022\036\n\032S" + - "PLIT_TABLE_REGION_PREPARE\020\001\022$\n SPLIT_TAB" + - "LE_REGION_PRE_OPERATION\020\002\022*\n&SPLIT_TABLE" + - "_REGION_CLOSE_PARENT_REGION\020\003\022.\n*SPLIT_T" + - "ABLE_REGION_CREATE_DAUGHTER_REGIONS\020\004\0220\n" + - ",SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE" + - "_PONR\020\005\022\"\n\036SPLIT_TABLE_REGION_UPDATE_MET", - "A\020\006\022/\n+SPLIT_TABLE_REGION_PRE_OPERATION_" + - "AFTER_PONR\020\007\022)\n%SPLIT_TABLE_REGION_OPEN_" + - "CHILD_REGIONS\020\010\022%\n!SPLIT_TABLE_REGION_PO" + - "ST_OPERATION\020\t*\245\004\n\026MergeTableRegionsStat" + - "e\022\037\n\033MERGE_TABLE_REGIONS_PREPARE\020\001\022%\n!ME" + - "RGE_TABLE_REGIONS_PRE_OPERATION\020\002\022.\n*MER" + - "GE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS\020" + - "\003\022+\n\'MERGE_TABLE_REGIONS_PRE_MERGE_OPERA" + - "TION\020\004\022/\n+MERGE_TABLE_REGIONS_SET_MERGIN" + - "G_TABLE_STATE\020\005\022%\n!MERGE_TABLE_REGIONS_C", - "LOSE_REGIONS\020\006\022,\n(MERGE_TABLE_REGIONS_CR" + - "EATE_MERGED_REGION\020\007\0222\n.MERGE_TABLE_REGI" + - "ONS_PRE_MERGE_COMMIT_OPERATION\020\010\022#\n\037MERG" + - "E_TABLE_REGIONS_UPDATE_META\020\t\0223\n/MERGE_T" + - "ABLE_REGIONS_POST_MERGE_COMMIT_OPERATION" + - "\020\n\022*\n&MERGE_TABLE_REGIONS_OPEN_MERGED_RE" + - "GION\020\013\022&\n\"MERGE_TABLE_REGIONS_POST_OPERA" + - "TION\020\014*\234\002\n\020ServerCrashState\022\026\n\022SERVER_CR" + - "ASH_START\020\001\022\035\n\031SERVER_CRASH_PROCESS_META" + - "\020\002\022\034\n\030SERVER_CRASH_GET_REGIONS\020\003\022\036\n\032SERV", - "ER_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH" + - "_SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH_PREPARE_LO" + - "G_REPLAY\020\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033S" + - "ERVER_CRASH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_C" + - "RASH_FINISH\020d*r\n\025RegionTransitionState\022\033" + - "\n\027REGION_TRANSITION_QUEUE\020\001\022\036\n\032REGION_TR" + - "ANSITION_DISPATCH\020\002\022\034\n\030REGION_TRANSITION" + - "_FINISH\020\003*C\n\017MoveRegionState\022\030\n\024MOVE_REG" + - "ION_UNASSIGN\020\001\022\026\n\022MOVE_REGION_ASSIGN\020\002*[" + - "\n\rGCRegionState\022\025\n\021GC_REGION_PREPARE\020\001\022\025", - "\n\021GC_REGION_ARCHIVE\020\002\022\034\n\030GC_REGION_PURGE" + - "_METADATA\020\003*o\n\024GCMergedRegionsState\022\035\n\031G" + - "C_MERGED_REGIONS_PREPARE\020\001\022\033\n\027GC_MERGED_" + - "REGIONS_PURGE\020\002\022\033\n\027GC_REGION_EDIT_METADA" + - "TA\020\003BR\n1org.apache.hadoop.hbase.shaded.p" + - "rotobuf.generatedB\025MasterProcedureProtos" + - "H\001\210\001\001\240\001\001" + "(\010:\004true\"\177\n\024RecoverMetaStateData\0220\n\022fail" + + "ed_meta_server\030\001 \001(\0132\024.hbase.pb.ServerNa" + + "me\022\036\n\020should_split_wal\030\002 \001(\010:\004true\022\025\n\nre" + + "plica_id\030\003 \001(\005:\0010\"\311\001\n\025AssignRegionStateD" + + "ata\0229\n\020transition_state\030\001 \002(\0162\037.hbase.pb", + ".RegionTransitionState\022)\n\013region_info\030\002 " + + "\002(\0132\024.hbase.pb.RegionInfo\022\035\n\016force_new_p" + + "lan\030\003 \001(\010:\005false\022+\n\rtarget_server\030\004 \001(\0132" + + "\024.hbase.pb.ServerName\"\365\001\n\027UnassignRegion" + + "StateData\0229\n\020transition_state\030\001 \002(\0162\037.hb" + + "ase.pb.RegionTransitionState\022)\n\013region_i" + + "nfo\030\002 \002(\0132\024.hbase.pb.RegionInfo\0220\n\022desti" + + "nation_server\030\003 \001(\0132\024.hbase.pb.ServerNam" + + "e\022,\n\016hosting_server\030\005 \001(\0132\024.hbase.pb.Ser" + + "verName\022\024\n\005force\030\004 \001(\010:\005false\"\237\001\n\023MoveRe", + "gionStateData\022)\n\013region_info\030\001 \001(\0132\024.hba" + + "se.pb.RegionInfo\022+\n\rsource_server\030\002 \002(\0132" + + "\024.hbase.pb.ServerName\0220\n\022destination_ser" + + "ver\030\003 \002(\0132\024.hbase.pb.ServerName\">\n\021GCReg" + + "ionStateData\022)\n\013region_info\030\001 \002(\0132\024.hbas" + + "e.pb.RegionInfo\"\226\001\n\030GCMergedRegionsState" + + "Data\022&\n\010parent_a\030\001 \002(\0132\024.hbase.pb.Region" + + "Info\022&\n\010parent_b\030\002 \002(\0132\024.hbase.pb.Region" + + "Info\022*\n\014merged_child\030\003 \002(\0132\024.hbase.pb.Re" + + "gionInfo*\330\001\n\020CreateTableState\022\036\n\032CREATE_", + "TABLE_PRE_OPERATION\020\001\022 \n\034CREATE_TABLE_WR" + + "ITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_M" + + "ETA\020\003\022\037\n\033CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"" + + "\n\036CREATE_TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CR" + + "EATE_TABLE_POST_OPERATION\020\006*\207\002\n\020ModifyTa" + + "bleState\022\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MO" + + "DIFY_TABLE_PRE_OPERATION\020\002\022(\n$MODIFY_TAB" + + "LE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_T" + + "ABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_T" + + "ABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_", + "POST_OPERATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_" + + "ALL_REGIONS\020\007*\212\002\n\022TruncateTableState\022 \n\034" + + "TRUNCATE_TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCA" + + "TE_TABLE_REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_" + + "TABLE_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABL" + + "E_CREATE_FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_A" + + "DD_TO_META\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_RE" + + "GIONS\020\006\022!\n\035TRUNCATE_TABLE_POST_OPERATION" + + "\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TABLE_" + + "PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOVE_F", + "ROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYO" + + "UT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACHE\020\004" + + "\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033D" + + "ELETE_TABLE_POST_OPERATION\020\006*\320\001\n\024CreateN" + + "amespaceState\022\034\n\030CREATE_NAMESPACE_PREPAR" + + "E\020\001\022%\n!CREATE_NAMESPACE_CREATE_DIRECTORY" + + "\020\002\022)\n%CREATE_NAMESPACE_INSERT_INTO_NS_TA" + + "BLE\020\003\022\036\n\032CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n" + + "$CREATE_NAMESPACE_SET_NAMESPACE_QUOTA\020\005*" + + "z\n\024ModifyNamespaceState\022\034\n\030MODIFY_NAMESP", + "ACE_PREPARE\020\001\022$\n MODIFY_NAMESPACE_UPDATE" + + "_NS_TABLE\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_Z" + + "K\020\003*\332\001\n\024DeleteNamespaceState\022\034\n\030DELETE_N" + + "AMESPACE_PREPARE\020\001\022)\n%DELETE_NAMESPACE_D" + + "ELETE_FROM_NS_TABLE\020\002\022#\n\037DELETE_NAMESPAC" + + "E_REMOVE_FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_D" + + "ELETE_DIRECTORIES\020\004\022+\n\'DELETE_NAMESPACE_" + + "REMOVE_NAMESPACE_QUOTA\020\005*\331\001\n\024AddColumnFa" + + "milyState\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001" + + "\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n", + ")ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPT" + + "OR\020\003\022$\n ADD_COLUMN_FAMILY_POST_OPERATION" + + "\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIO" + + "NS\020\005*\353\001\n\027ModifyColumnFamilyState\022 \n\034MODI" + + "FY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COL" + + "UMN_FAMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COL" + + "UMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#" + + "MODIFY_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n" + + "\'MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS" + + "\020\005*\226\002\n\027DeleteColumnFamilyState\022 \n\034DELETE", + "_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUM" + + "N_FAMILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUM" + + "N_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DE" + + "LETE_COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n" + + "#DELETE_COLUMN_FAMILY_POST_OPERATION\020\005\022+" + + "\n\'DELETE_COLUMN_FAMILY_REOPEN_ALL_REGION" + + "S\020\006*\350\001\n\020EnableTableState\022\030\n\024ENABLE_TABLE" + + "_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION" + + "\020\002\022)\n%ENABLE_TABLE_SET_ENABLING_TABLE_ST" + + "ATE\020\003\022$\n ENABLE_TABLE_MARK_REGIONS_ONLIN", + "E\020\004\022(\n$ENABLE_TABLE_SET_ENABLED_TABLE_ST" + + "ATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPERATION\020\006*\362" + + "\001\n\021DisableTableState\022\031\n\025DISABLE_TABLE_PR" + + "EPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OPERATION\020\002" + + "\022+\n\'DISABLE_TABLE_SET_DISABLING_TABLE_ST" + + "ATE\020\003\022&\n\"DISABLE_TABLE_MARK_REGIONS_OFFL" + + "INE\020\004\022*\n&DISABLE_TABLE_SET_DISABLED_TABL" + + "E_STATE\020\005\022 \n\034DISABLE_TABLE_POST_OPERATIO" + + "N\020\006*\206\002\n\022CloneSnapshotState\022 \n\034CLONE_SNAP" + + "SHOT_PRE_OPERATION\020\001\022\"\n\036CLONE_SNAPSHOT_W", + "RITE_FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_T" + + "O_META\020\003\022!\n\035CLONE_SNAPSHOT_ASSIGN_REGION" + + "S\020\004\022$\n CLONE_SNAPSHOT_UPDATE_DESC_CACHE\020" + + "\005\022!\n\035CLONE_SNAPSHOT_POST_OPERATION\020\006\022\036\n\032" + + "CLONE_SNAPHOST_RESTORE_ACL\020\007*\322\001\n\024Restore" + + "SnapshotState\022\"\n\036RESTORE_SNAPSHOT_PRE_OP" + + "ERATION\020\001\022,\n(RESTORE_SNAPSHOT_UPDATE_TAB" + + "LE_DESCRIPTOR\020\002\022$\n RESTORE_SNAPSHOT_WRIT" + + "E_FS_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE" + + "_META\020\004\022 \n\034RESTORE_SNAPSHOT_RESTORE_ACL\020", + "\005*\376\001\n\033DispatchMergingRegionsState\022$\n DIS" + + "PATCH_MERGING_REGIONS_PREPARE\020\001\022*\n&DISPA" + + "TCH_MERGING_REGIONS_PRE_OPERATION\020\002\0223\n/D" + + "ISPATCH_MERGING_REGIONS_MOVE_REGION_TO_S" + + "AME_RS\020\003\022+\n\'DISPATCH_MERGING_REGIONS_DO_" + + "MERGE_IN_RS\020\004\022+\n\'DISPATCH_MERGING_REGION" + + "S_POST_OPERATION\020\005*\222\003\n\025SplitTableRegionS" + + "tate\022\036\n\032SPLIT_TABLE_REGION_PREPARE\020\001\022$\n " + + "SPLIT_TABLE_REGION_PRE_OPERATION\020\002\022*\n&SP" + + "LIT_TABLE_REGION_CLOSE_PARENT_REGION\020\003\022.", + "\n*SPLIT_TABLE_REGION_CREATE_DAUGHTER_REG" + + "IONS\020\004\0220\n,SPLIT_TABLE_REGION_PRE_OPERATI" + + "ON_BEFORE_PONR\020\005\022\"\n\036SPLIT_TABLE_REGION_U" + + "PDATE_META\020\006\022/\n+SPLIT_TABLE_REGION_PRE_O" + + "PERATION_AFTER_PONR\020\007\022)\n%SPLIT_TABLE_REG" + + "ION_OPEN_CHILD_REGIONS\020\010\022%\n!SPLIT_TABLE_" + + "REGION_POST_OPERATION\020\t*\245\004\n\026MergeTableRe" + + "gionsState\022\037\n\033MERGE_TABLE_REGIONS_PREPAR" + + "E\020\001\022%\n!MERGE_TABLE_REGIONS_PRE_OPERATION" + + "\020\002\022.\n*MERGE_TABLE_REGIONS_MOVE_REGION_TO", + "_SAME_RS\020\003\022+\n\'MERGE_TABLE_REGIONS_PRE_ME" + + "RGE_OPERATION\020\004\022/\n+MERGE_TABLE_REGIONS_S" + + "ET_MERGING_TABLE_STATE\020\005\022%\n!MERGE_TABLE_" + + "REGIONS_CLOSE_REGIONS\020\006\022,\n(MERGE_TABLE_R" + + "EGIONS_CREATE_MERGED_REGION\020\007\0222\n.MERGE_T" + + "ABLE_REGIONS_PRE_MERGE_COMMIT_OPERATION\020" + + "\010\022#\n\037MERGE_TABLE_REGIONS_UPDATE_META\020\t\0223" + + "\n/MERGE_TABLE_REGIONS_POST_MERGE_COMMIT_" + + "OPERATION\020\n\022*\n&MERGE_TABLE_REGIONS_OPEN_" + + "MERGED_REGION\020\013\022&\n\"MERGE_TABLE_REGIONS_P", + "OST_OPERATION\020\014*\234\002\n\020ServerCrashState\022\026\n\022" + + "SERVER_CRASH_START\020\001\022\035\n\031SERVER_CRASH_PRO" + + "CESS_META\020\002\022\034\n\030SERVER_CRASH_GET_REGIONS\020" + + "\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027SER" + + "VER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH_P" + + "REPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_CRASH_ASSI" + + "GN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_ASSIGN\020\t\022\027\n" + + "\023SERVER_CRASH_FINISH\020d*P\n\020RecoverMetaSta" + + "te\022\033\n\027RECOVER_META_SPLIT_LOGS\020\001\022\037\n\033RECOV" + + "ER_META_ASSIGN_REGIONS\020\002*r\n\025RegionTransi", + "tionState\022\033\n\027REGION_TRANSITION_QUEUE\020\001\022\036" + + "\n\032REGION_TRANSITION_DISPATCH\020\002\022\034\n\030REGION" + + "_TRANSITION_FINISH\020\003*C\n\017MoveRegionState\022" + + "\030\n\024MOVE_REGION_UNASSIGN\020\001\022\026\n\022MOVE_REGION" + + "_ASSIGN\020\002*[\n\rGCRegionState\022\025\n\021GC_REGION_" + + "PREPARE\020\001\022\025\n\021GC_REGION_ARCHIVE\020\002\022\034\n\030GC_R" + + "EGION_PURGE_METADATA\020\003*o\n\024GCMergedRegion" + + "sState\022\035\n\031GC_MERGED_REGIONS_PREPARE\020\001\022\033\n" + + "\027GC_MERGED_REGIONS_PURGE\020\002\022\033\n\027GC_REGION_" + + "EDIT_METADATA\020\003BR\n1org.apache.hadoop.hba", + "se.shaded.protobuf.generatedB\025MasterProc" + + "edureProtosH\001\210\001\001\240\001\001" }; org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { @@ -31227,32 +32091,38 @@ public final class MasterProcedureProtos { org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_ServerCrashStateData_descriptor, new java.lang.String[] { "ServerName", "DistributedLogReplay", "RegionsOnCrashedServer", "RegionsAssigned", "CarryingMeta", "ShouldSplitWal", }); - internal_static_hbase_pb_AssignRegionStateData_descriptor = + internal_static_hbase_pb_RecoverMetaStateData_descriptor = getDescriptor().getMessageTypes().get(19); + internal_static_hbase_pb_RecoverMetaStateData_fieldAccessorTable = new + org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_hbase_pb_RecoverMetaStateData_descriptor, + new java.lang.String[] { "FailedMetaServer", "ShouldSplitWal", "ReplicaId", }); + internal_static_hbase_pb_AssignRegionStateData_descriptor = + getDescriptor().getMessageTypes().get(20); internal_static_hbase_pb_AssignRegionStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_AssignRegionStateData_descriptor, new java.lang.String[] { "TransitionState", "RegionInfo", "ForceNewPlan", "TargetServer", }); internal_static_hbase_pb_UnassignRegionStateData_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(21); internal_static_hbase_pb_UnassignRegionStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_UnassignRegionStateData_descriptor, new java.lang.String[] { "TransitionState", "RegionInfo", "DestinationServer", "HostingServer", "Force", }); internal_static_hbase_pb_MoveRegionStateData_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(22); internal_static_hbase_pb_MoveRegionStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_MoveRegionStateData_descriptor, new java.lang.String[] { "RegionInfo", "SourceServer", "DestinationServer", }); internal_static_hbase_pb_GCRegionStateData_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(23); internal_static_hbase_pb_GCRegionStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GCRegionStateData_descriptor, new java.lang.String[] { "RegionInfo", }); internal_static_hbase_pb_GCMergedRegionsStateData_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(24); internal_static_hbase_pb_GCMergedRegionsStateData_fieldAccessorTable = new org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_hbase_pb_GCMergedRegionsStateData_descriptor, diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto index c7d6598b81cc4b07b191ba37f8904f67cedd6a1e..74ae16d2cb4a0490054b98f8928af3b3c3ba23e8 100644 --- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto @@ -330,6 +330,12 @@ message ServerCrashStateData { optional bool should_split_wal = 6 [default = true]; } +message RecoverMetaStateData { + optional ServerName failed_meta_server = 1; + optional bool should_split_wal = 2 [default = true]; + optional int32 replica_id = 3 [default = 0]; +} + enum ServerCrashState { SERVER_CRASH_START = 1; SERVER_CRASH_PROCESS_META = 2; @@ -343,6 +349,11 @@ enum ServerCrashState { SERVER_CRASH_FINISH = 100; } +enum RecoverMetaState { + RECOVER_META_SPLIT_LOGS = 1; + RECOVER_META_ASSIGN_REGIONS = 2; +} + enum RegionTransitionState { REGION_TRANSITION_QUEUE = 1; REGION_TRANSITION_DISPATCH = 2; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 0e6f13d7b2b06d04e391f7c4975efaf3f6a3d6d8..5316b547ecc1402fccb63b53f456e2626984ba8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure; import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; +import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure; import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; import org.apache.hadoop.hbase.master.replication.ReplicationManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; @@ -398,9 +399,6 @@ public class HMaster extends HRegionServer implements MasterServices { private long splitPlanCount; private long mergePlanCount; - /** flag used in test cases in order to simulate RS failures during master initialization */ - private volatile boolean initializationBeforeMetaAssignment = false; - /* Handle favored nodes information */ private FavoredNodesManager favoredNodesManager; @@ -796,14 +794,6 @@ public class HMaster extends HRegionServer implements MasterServices { status.setStatus("Wait for region servers to report in"); waitForRegionServers(status); - // get a list for previously failed RS which need log splitting work - // we recover hbase:meta region servers inside master initialization and - // handle other failed servers in SSH in order to start up master node ASAP - MasterMetaBootstrap metaBootstrap = createMetaBootstrap(this, status); - metaBootstrap.splitMetaLogsBeforeAssignment(); - - this.initializationBeforeMetaAssignment = true; - if (this.balancer instanceof FavoredNodesPromoter) { favoredNodesManager = new FavoredNodesManager(this); } @@ -822,8 +812,12 @@ public class HMaster extends HRegionServer implements MasterServices { if (isStopped()) return; // Make sure meta assigned before proceeding. - status.setStatus("Assigning Meta Region"); - metaBootstrap.assignMeta(); + status.setStatus("Recovering Meta Region"); + + // we recover hbase:meta region servers inside master initialization and + // handle other failed servers in SSH in order to start up master node ASAP + MasterMetaBootstrap metaBootstrap = createMetaBootstrap(this, status); + metaBootstrap.recoverMeta(); // check if master is shutting down because above assignMeta could return even hbase:meta isn't // assigned when master is shutting down @@ -2719,14 +2713,6 @@ public class HMaster extends HRegionServer implements MasterServices { } /** - * Report whether this master has started initialization and is about to do meta region assignment - * @return true if master is in initialization & about to assign hbase:meta regions - */ - public boolean isInitializationStartsMetaRegionAssignment() { - return this.initializationBeforeMetaAssignment; - } - - /** * Compute the average load across all region servers. * Currently, this uses a very naive computation - just uses the number of * regions being served, ignoring stats about number of requests. @@ -3433,6 +3419,17 @@ public class HMaster extends HRegionServer implements MasterServices { return lockManager; } + @Override + public boolean recoverMeta() throws IOException { + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0); + long procId = procedureExecutor.submitProcedure(new RecoverMetaProcedure(null, true, latch)); + LOG.info("Waiting on RecoverMetaProcedure submitted with procId=" + procId); + latch.await(); + LOG.info("Default replica of hbase:meta, location=" + + getMetaTableLocator().getMetaRegionLocation(getZooKeeper())); + return assignmentManager.isMetaInitialized(); + } + public QuotaObserverChore getQuotaObserverChore() { return this.quotaObserverChore; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java index 049e6595d22fdd63834ba03dfd399f2e5ad97ff1..7424dac3257b2b6bfde612e35c57d92667184c84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -49,45 +48,24 @@ public class MasterMetaBootstrap { private final MonitoredTask status; private final HMaster master; - private Set previouslyFailedServers; - private Set previouslyFailedMetaRSs; - public MasterMetaBootstrap(final HMaster master, final MonitoredTask status) { this.master = master; this.status = status; } - public void splitMetaLogsBeforeAssignment() throws IOException, KeeperException { + public void recoverMeta() throws InterruptedException, IOException { + master.recoverMeta(); + master.getTableStateManager().start(); + enableCrashedServerProcessing(false); + } + + public void processDeadServers() { // get a list for previously failed RS which need log splitting work // we recover hbase:meta region servers inside master initialization and // handle other failed servers in SSH in order to start up master node ASAP - previouslyFailedServers = master.getMasterWalManager().getFailedServersFromLogFolders(); - - // log splitting for hbase:meta server - ServerName oldMetaServerLocation = master.getMetaTableLocator() - .getMetaRegionLocation(master.getZooKeeper()); - if (oldMetaServerLocation != null && previouslyFailedServers.contains(oldMetaServerLocation)) { - splitMetaLogBeforeAssignment(oldMetaServerLocation); - // Note: we can't remove oldMetaServerLocation from previousFailedServers list because it - // may also host user regions - } - previouslyFailedMetaRSs = getPreviouselyFailedMetaServersFromZK(); - // need to use union of previouslyFailedMetaRSs recorded in ZK and previouslyFailedServers - // instead of previouslyFailedMetaRSs alone to address the following two situations: - // 1) the chained failure situation(recovery failed multiple times in a row). - // 2) master get killed right before it could delete the recovering hbase:meta from ZK while the - // same server still has non-meta wals to be replayed so that - // removeStaleRecoveringRegionsFromZK can't delete the stale hbase:meta region - // Passing more servers into splitMetaLog is all right. If a server doesn't have hbase:meta wal, - // there is no op for the server. - previouslyFailedMetaRSs.addAll(previouslyFailedServers); - } - - public void assignMeta() throws InterruptedException, IOException, KeeperException { - assignMeta(previouslyFailedMetaRSs, HRegionInfo.DEFAULT_REPLICA_ID); - } + Set previouslyFailedServers = + master.getMasterWalManager().getFailedServersFromLogFolders(); - public void processDeadServers() throws IOException { // Master has recovered hbase:meta region server and we put // other failed region servers in a queue to be handled later by SSH for (ServerName tmpServer : previouslyFailedServers) { @@ -99,17 +77,12 @@ public class MasterMetaBootstrap { throws IOException, InterruptedException, KeeperException { int numReplicas = master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); - final Set EMPTY_SET = new HashSet<>(); for (int i = 1; i < numReplicas; i++) { - assignMeta(EMPTY_SET, i); + assignMeta(i); } unassignExcessMetaReplica(numReplicas); } - private void splitMetaLogBeforeAssignment(ServerName currentMetaServer) throws IOException { - master.getMasterWalManager().splitMetaLog(currentMetaServer); - } - private void unassignExcessMetaReplica(int numMetaReplicasConfigured) { final ZooKeeperWatcher zooKeeper = master.getZooKeeper(); // unassign the unneeded replicas (for e.g., if the previous master was configured @@ -137,12 +110,11 @@ public class MasterMetaBootstrap { /** * Check hbase:meta is assigned. If not, assign it. */ - protected void assignMeta(Set previouslyFailedMetaRSs, int replicaId) + protected void assignMeta(int replicaId) throws InterruptedException, IOException, KeeperException { final AssignmentManager assignmentManager = master.getAssignmentManager(); // Work on meta region - int assigned = 0; // TODO: Unimplemented // long timeout = // master.getConfiguration().getLong("hbase.catalog.verification.timeout", 1000); @@ -172,14 +144,14 @@ public class MasterMetaBootstrap { // if the meta region server is died at this time, we need it to be re-assigned // by SSH so that system tables can be assigned. // No need to wait for meta is assigned = 0 when meta is just verified. - if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableCrashedServerProcessing(assigned != 0); + if (replicaId == HRegionInfo.DEFAULT_REPLICA_ID) enableCrashedServerProcessing(false); LOG.info("hbase:meta with replicaId " + replicaId + ", location=" + master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper(), replicaId)); status.setStatus("META assigned."); } private void enableCrashedServerProcessing(final boolean waitForMeta) - throws IOException, InterruptedException { + throws InterruptedException { // If crashed server processing is disabled, we enable it and expire those dead but not expired // servers. This is required so that if meta is assigning to a server which dies after // assignMeta starts assignment, ServerCrashProcedure can re-assign it. Otherwise, we will be @@ -193,23 +165,4 @@ public class MasterMetaBootstrap { master.getMetaTableLocator().waitMetaRegionLocation(master.getZooKeeper()); } } - - /** - * This function returns a set of region server names under hbase:meta recovering region ZK node - * @return Set of meta server names which were recorded in ZK - */ - private Set getPreviouselyFailedMetaServersFromZK() throws KeeperException { - final ZooKeeperWatcher zooKeeper = master.getZooKeeper(); - Set result = new HashSet<>(); - String metaRecoveringZNode = ZKUtil.joinZNode(zooKeeper.znodePaths.recoveringRegionsZNode, - HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); - List regionFailedServers = ZKUtil.listChildrenNoWatch(zooKeeper, metaRecoveringZNode); - if (regionFailedServers == null) return result; - - for (String failedServer : regionFailedServers) { - ServerName server = ServerName.parseServerName(failedServer); - result.add(server); - } - return result; - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 2f8b0ee12badeb16de05c00b547878c2d9d1f731..8777a73f4c97717574cba9dca5aca1bf1b1bd154 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -500,4 +500,9 @@ public interface MasterServices extends Server { public String getRegionServerVersion(final ServerName sn); public void checkIfShouldMoveSystemRegionAsync(); + + /** + * Recover meta table + */ + boolean recoverMeta() throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index c1e39fdc854e83e23dffb68e699d9212a4635441..019ef65c01227e17dc69774688c1ccee3e716720 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -151,7 +151,7 @@ public class MasterWalManager { * Inspect the log directory to find dead servers which need recovery work * @return A set of ServerNames which aren't running but still have WAL files left in file system */ - Set getFailedServersFromLogFolders() { + public Set getFailedServersFromLogFolders() { boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors", WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 58453ee4bc98ba3fe8a624aa462359140c0520ef..976637192de264dd11cbf5cbdeedcde3e952d8a1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -1453,6 +1453,8 @@ public class AssignmentManager implements ServerListener { synchronized (regionNode) { State state = regionNode.transitionState(State.OPEN, RegionStates.STATES_EXPECTED_ON_OPEN); if (isMetaRegion(hri)) { + master.getTableStateManager().setTableState(TableName.META_TABLE_NAME, + TableState.State.ENABLED); setMetaInitialized(hri, true); } regionStates.addRegionToServer(regionNode.getRegionLocation(), regionNode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java new file mode 100644 index 0000000000000000000000000000000000000000..a5c22ed06645b35dbe3336cbd274b7355083937b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RecoverMetaProcedure.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import com.google.common.base.Preconditions; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.assignment.AssignProcedure; +import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException; +import org.apache.hadoop.hbase.procedure2.ProcedureYieldException; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverMetaState; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Set; + +/** + * This procedure recovers meta from prior shutdown/ crash of a server, and brings meta online by + * assigning meta region/s. Any place where meta is accessed and requires meta to be online, need to + * submit this procedure instead of duplicating steps to recover meta in the code. + */ +public class RecoverMetaProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(RecoverMetaProcedure.class); + + private ServerName failedMetaServer; + private boolean shouldSplitWal; + private int replicaId; + + private final ProcedurePrepareLatch syncLatch; + private HMaster master; + + /** + * Call this constructor to queue up a {@link RecoverMetaProcedure} in response to meta + * carrying server crash + * @param failedMetaServer failed/ crashed region server that was carrying meta + * @param shouldSplitLog split log file of meta region + */ + public RecoverMetaProcedure(final ServerName failedMetaServer, final boolean shouldSplitLog) { + this(failedMetaServer, shouldSplitLog, null); + } + + /** + * Constructor with latch, for blocking/ sync usage + */ + public RecoverMetaProcedure(final ServerName failedMetaServer, final boolean shouldSplitLog, + final ProcedurePrepareLatch latch) { + this.failedMetaServer = failedMetaServer; + this.shouldSplitWal = shouldSplitLog; + this.replicaId = HRegionInfo.DEFAULT_REPLICA_ID; + this.syncLatch = latch; + } + + /** + * This constructor is also used when deserializing from a procedure store; we'll construct one + * of these then call {@link #deserializeStateData(InputStream)}. Do not use directly. + */ + public RecoverMetaProcedure() { + this(null, false); + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, + MasterProcedureProtos.RecoverMetaState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + prepare(env); + + if (!isRunRequired()) { + LOG.info(this + "; Meta already initialized. Skipping run"); + return Flow.NO_MORE_STATE; + } + + try { + switch (state) { + case RECOVER_META_SPLIT_LOGS: + LOG.info("Start " + this); + if (shouldSplitWal) { + if (failedMetaServer != null) { + master.getMasterWalManager().splitMetaLog(failedMetaServer); + } else { + ServerName serverName = + master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()); + Set previouslyFailedServers = + master.getMasterWalManager().getFailedServersFromLogFolders(); + if (serverName != null && previouslyFailedServers.contains(serverName)) { + master.getMasterWalManager().splitMetaLog(serverName); + } + } + } + setNextState(RecoverMetaState.RECOVER_META_ASSIGN_REGIONS); + break; + + case RECOVER_META_ASSIGN_REGIONS: + HRegionInfo hri = RegionReplicaUtil.getRegionInfoForReplica( + HRegionInfo.FIRST_META_REGIONINFO, this.replicaId); + + AssignProcedure metaAssignProcedure; + if (failedMetaServer != null) { + LOG.info(this + "; Assigning meta with new plan. previous meta server=" + + failedMetaServer); + metaAssignProcedure = master.getAssignmentManager().createAssignProcedure(hri, true); + } else { + // get server carrying meta from zk + ServerName metaServer = + MetaTableLocator.getMetaRegionState(master.getZooKeeper()).getServerName(); + LOG.info(this + "; Retaining meta assignment to server=" + metaServer); + metaAssignProcedure = + master.getAssignmentManager().createAssignProcedure(hri, metaServer); + } + + addChildProcedure(metaAssignProcedure); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException|KeeperException e) { + LOG.warn(this + "; Failed state=" + state + ", retry " + this + "; cycles=" + + getCycles(), e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(MasterProcedureEnv env, + MasterProcedureProtos.RecoverMetaState recoverMetaState) + throws IOException, InterruptedException { + // Can't rollback + throw new UnsupportedOperationException("unhandled state=" + recoverMetaState); + } + + @Override + protected MasterProcedureProtos.RecoverMetaState getState(int stateId) { + return RecoverMetaState.forNumber(stateId); + } + + @Override + protected int getStateId(MasterProcedureProtos.RecoverMetaState recoverMetaState) { + return recoverMetaState.getNumber(); + } + + @Override + protected MasterProcedureProtos.RecoverMetaState getInitialState() { + return RecoverMetaState.RECOVER_META_SPLIT_LOGS; + } + + @Override + protected void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" failedMetaServer="); + sb.append(failedMetaServer); + sb.append(", splitWal="); + sb.append(shouldSplitWal); + } + + @Override + protected void serializeStateData(OutputStream stream) throws IOException { + super.serializeStateData(stream); + MasterProcedureProtos.RecoverMetaStateData.Builder state = + MasterProcedureProtos.RecoverMetaStateData.newBuilder().setShouldSplitWal(shouldSplitWal); + if (failedMetaServer != null) { + state.setFailedMetaServer(ProtobufUtil.toServerName(failedMetaServer)); + } + state.setReplicaId(replicaId); + state.build().writeDelimitedTo(stream); + } + + @Override + protected void deserializeStateData(InputStream stream) throws IOException { + super.deserializeStateData(stream); + MasterProcedureProtos.RecoverMetaStateData state = + MasterProcedureProtos.RecoverMetaStateData.parseDelimitedFrom(stream); + this.shouldSplitWal = state.hasShouldSplitWal() && state.getShouldSplitWal(); + this.failedMetaServer = state.hasFailedMetaServer() ? + ProtobufUtil.toServerName(state.getFailedMetaServer()) : null; + this.replicaId = state.hasReplicaId() ? state.getReplicaId() : HRegionInfo.DEFAULT_REPLICA_ID; + } + + @Override + protected LockState acquireLock(MasterProcedureEnv env) { + if (env.getProcedureScheduler().waitTableExclusiveLock(this, TableName.META_TABLE_NAME)) { + return LockState.LOCK_EVENT_WAIT; + } + return LockState.LOCK_ACQUIRED; + } + + @Override + protected void releaseLock(MasterProcedureEnv env) { + env.getProcedureScheduler().wakeTableExclusiveLock(this, TableName.META_TABLE_NAME); + } + + @Override + protected void completionCleanup(MasterProcedureEnv env) { + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + } + + @Override + public TableName getTableName() { + return TableName.META_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.ENABLE; + } + + /** + * @return true if failedMetaServer is not null (meta carrying server crashed) or meta is + * already initialized + */ + private boolean isRunRequired() { + return failedMetaServer != null || !master.getAssignmentManager().isMetaInitialized(); + } + + /** + * Prepare for execution + */ + private void prepare(MasterProcedureEnv env) { + if (master == null) { + master = (HMaster) env.getMasterServices(); + Preconditions.checkArgument(master != null); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java index db4ac327bb01935d9f6be1e457f38a30546dd6a0..be9270cfc3fa6134267e2a41ed8e164f9f28c498 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; -import java.util.Collection; import java.util.Iterator; import java.util.List; @@ -79,17 +78,6 @@ implements ServerProcedureInterface { private boolean shouldSplitWal; /** - * Cycles on same state. Good for figuring if we are stuck. - */ - private int cycles = 0; - - /** - * Ordinal of the previous state. So we can tell if we are progressing or not. TODO: if useful, - * move this back up into StateMachineProcedure - */ - private int previousState; - - /** * Call this constructor queuing up a Procedure. * @param serverName Name of the crashed server. * @param shouldSplitWal True if we should split WALs as part of crashed server processing. @@ -117,16 +105,6 @@ implements ServerProcedureInterface { @Override protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) throws ProcedureSuspendedException, ProcedureYieldException { - if (LOG.isTraceEnabled()) { - LOG.trace(state + " " + this + "; cycles=" + this.cycles); - } - // Keep running count of cycles - if (state.ordinal() != this.previousState) { - this.previousState = state.ordinal(); - this.cycles = 0; - } else { - this.cycles++; - } final MasterServices services = env.getMasterServices(); // HBASE-14802 // If we have not yet notified that we are processing a dead server, we should do now. @@ -183,7 +161,7 @@ implements ServerProcedureInterface { if (LOG.isTraceEnabled()) { LOG.trace("Assigning regions " + HRegionInfo.getShortNameToLog(regionsOnCrashedServer) + ", " + this + - "; cycles=" + this.cycles); + "; cycles=" + getCycles()); } handleRIT(env, regionsOnCrashedServer); AssignmentManager am = env.getAssignmentManager(); @@ -201,7 +179,7 @@ implements ServerProcedureInterface { throw new UnsupportedOperationException("unhandled state=" + state); } } catch (IOException e) { - LOG.warn("Failed state=" + state + ", retry " + this + "; cycles=" + this.cycles, e); + LOG.warn("Failed state=" + state + ", retry " + this + "; cycles=" + getCycles(), e); } return Flow.HAS_MORE_STATE; } @@ -216,15 +194,10 @@ implements ServerProcedureInterface { /** * @param env * @throws IOException - * @throws InterruptedException */ private void processMeta(final MasterProcedureEnv env) throws IOException { - if (LOG.isDebugEnabled()) LOG.debug("Processing hbase:meta that was on " + this.serverName); - - if (this.shouldSplitWal) { - // TODO: Matteo. We BLOCK here but most important thing to be doing at this moment. - env.getMasterServices().getMasterWalManager().splitMetaLog(serverName); - } + if (LOG.isDebugEnabled()) LOG.debug(this + "; Processing hbase:meta that was on " + + this.serverName); // Assign meta if still carrying it. Check again: region may be assigned because of RIT timeout final AssignmentManager am = env.getMasterServices().getAssignmentManager(); @@ -232,19 +205,13 @@ implements ServerProcedureInterface { if (!isDefaultMetaRegion(hri)) continue; am.offlineRegion(hri); - addChildProcedure(am.createAssignProcedure(hri, true)); + addChildProcedure(new RecoverMetaProcedure(serverName, this.shouldSplitWal)); } } private boolean filterDefaultMetaRegions(final List regions) { if (regions == null) return false; - final Iterator it = regions.iterator(); - while (it.hasNext()) { - final HRegionInfo hri = it.next(); - if (isDefaultMetaRegion(hri)) { - it.remove(); - } - } + regions.removeIf(this::isDefaultMetaRegion); return !regions.isEmpty(); } @@ -268,10 +235,6 @@ implements ServerProcedureInterface { am.getRegionStates().logSplit(this.serverName); } - static int size(final Collection hris) { - return hris == null? 0: hris.size(); - } - @Override protected void rollbackState(MasterProcedureEnv env, ServerCrashState state) throws IOException { @@ -281,7 +244,7 @@ implements ServerProcedureInterface { @Override protected ServerCrashState getState(int stateId) { - return ServerCrashState.valueOf(stateId); + return ServerCrashState.forNumber(stateId); } @Override @@ -402,9 +365,8 @@ implements ServerProcedureInterface { * Notify them of crash. Remove assign entries from the passed in regions * otherwise we have two assigns going on and they will fight over who has lock. * Notify Unassigns also. - * @param crashedServer Server that crashed. + * @param env * @param regions Regions that were on crashed server - * @return Subset of regions that were RIT against crashedServer */ private void handleRIT(final MasterProcedureEnv env, final List regions) { if (regions == null) return; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 2d30d7ecdfc7c638753f9f535034a8a1b3048276..3c4dc945c5a078ff2b916683c06f08a810ce7deb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -52,6 +52,8 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import com.google.protobuf.Service; +import static org.mockito.Mockito.mock; + public class MockNoopMasterServices implements MasterServices, Server { private final Configuration conf; private final MetricsMaster metricsMaster; @@ -324,7 +326,7 @@ public class MockNoopMasterServices implements MasterServices, Server { @Override public TableStateManager getTableStateManager() { - return null; + return mock(TableStateManager.class); } @Override @@ -452,6 +454,11 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override + public boolean recoverMeta() throws IOException { + return false; + } + + @Override public ProcedureEvent getInitializedEvent() { // TODO Auto-generated method stub return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index fe0e7b193a3c8cc22d1ea5865e5078cbf084f863..4c06f07ad85c72d7e592e208990ce09f9f4880bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -272,7 +272,7 @@ public class TestMasterNoCluster { MasterMetaBootstrap createMetaBootstrap(final HMaster master, final MonitoredTask status) { return new MasterMetaBootstrap(this, status) { @Override - protected void assignMeta(Set previouslyFailedMeatRSs, int replicaId) { } + protected void assignMeta(int replicaId) { } }; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index d8a69a61d6a55b7f4d49966c38b0176f38376902..6dfcad17fd05383f8065588aa17eb3278141f504 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -92,13 +92,10 @@ public class MasterProcedureTestingUtility { public Void call() throws Exception { final AssignmentManager am = env.getAssignmentManager(); am.start(); - if (true) { - MasterMetaBootstrap metaBootstrap = new MasterMetaBootstrap(master, - TaskMonitor.get().createStatus("meta")); - metaBootstrap.splitMetaLogsBeforeAssignment(); - metaBootstrap.assignMeta(); - metaBootstrap.processDeadServers(); - } + MasterMetaBootstrap metaBootstrap = new MasterMetaBootstrap(master, + TaskMonitor.get().createStatus("meta")); + metaBootstrap.recoverMeta(); + metaBootstrap.processDeadServers(); am.joinCluster(); master.setInitialized(true); return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java index b6bf0bb15b7e6e1122af6cf415eb58a95f924340..0a31a84b33d92e4f8fe3562dc77ff70568763d97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java @@ -84,17 +84,18 @@ public class TestServerCrashProcedure { @Test(timeout=60000) public void testCrashTargetRs() throws Exception { + testRecoveryAndDoubleExecution(false, false); } @Ignore // HBASE-18366... To be enabled again. @Test(timeout=60000) public void testRecoveryAndDoubleExecutionOnRsWithMeta() throws Exception { - testRecoveryAndDoubleExecution(true); + testRecoveryAndDoubleExecution(true, true); } @Test(timeout=60000) public void testRecoveryAndDoubleExecutionOnRsWithoutMeta() throws Exception { - testRecoveryAndDoubleExecution(false); + testRecoveryAndDoubleExecution(false, true); } /** @@ -102,7 +103,8 @@ public class TestServerCrashProcedure { * needed state. * @throws Exception */ - private void testRecoveryAndDoubleExecution(final boolean carryingMeta) throws Exception { + private void testRecoveryAndDoubleExecution(final boolean carryingMeta, + final boolean doubleExecution) throws Exception { final TableName tableName = TableName.valueOf( "testRecoveryAndDoubleExecution-carryingMeta-" + carryingMeta); final Table t = this.util.createTable(tableName, HBaseTestingUtility.COLUMNS, @@ -120,7 +122,7 @@ public class TestServerCrashProcedure { master.setServerCrashProcessingEnabled(false); // find the first server that match the request and executes the test ServerName rsToKill = null; - for (HRegionInfo hri: util.getHBaseAdmin().getTableRegions(tableName)) { + for (HRegionInfo hri : util.getHBaseAdmin().getTableRegions(tableName)) { final ServerName serverName = AssignmentTestingUtil.getServerHoldingRegion(util, hri); if (AssignmentTestingUtil.isServerHoldingMeta(util, serverName) == carryingMeta) { rsToKill = serverName; @@ -135,14 +137,22 @@ public class TestServerCrashProcedure { master.getServerManager().moveFromOnlineToDeadServers(rsToKill); // Enable test flags and then queue the crash procedure. ProcedureTestingUtility.waitNoProcedureRunning(procExec); - ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); - long procId = procExec.submitProcedure(new ServerCrashProcedure( - procExec.getEnvironment(), rsToKill, true, carryingMeta)); - // Now run through the procedure twice crashing the executor on each step... - MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); + ServerCrashProcedure scp = new ServerCrashProcedure(procExec.getEnvironment(), rsToKill, + true, carryingMeta); + if (doubleExecution) { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + long procId = procExec.submitProcedure(scp); + // Now run through the procedure twice crashing the executor on each step... + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId); + } else { + ProcedureTestingUtility.submitAndWait(procExec, scp); + } // Assert all data came back. assertEquals(count, util.countRows(t)); assertEquals(checksum, util.checksumRows(t)); + } catch(Throwable throwable) { + LOG.error("Test failed!", throwable); + throw throwable; } finally { t.close(); } -- 2.10.1 (Apple Git-78)