diff --git hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index ee61841..64cf639 100644 --- hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -453,7 +453,8 @@ public abstract class Procedure implements Comparable { } @InterfaceAudience.Private - protected synchronized ProcedureState getState() { + public + synchronized ProcedureState getState() { return state; } diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java index 9ed9d7a..07cab69 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java @@ -1598,6 +1598,369 @@ public final class MasterProcedureProtos { } /** + * Protobuf enum {@code hbase.pb.AssignTableRegionState} + */ + public enum AssignTableRegionState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ASSIGN_TABLE_REGION_PREPARE = 1; + */ + ASSIGN_TABLE_REGION_PREPARE(0, 1), + /** + * ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION = 2; + */ + ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION(1, 2), + /** + * ASSIGN_TABLE_REGION_CHECK_OFFLINE = 3; + */ + ASSIGN_TABLE_REGION_CHECK_OFFLINE(2, 3), + /** + * ASSIGN_TABLE_REGION_GENERATE_PLAN = 4; + */ + ASSIGN_TABLE_REGION_GENERATE_PLAN(3, 4), + /** + * ASSIGN_TABLE_REGION_DO_ASSIGN = 5; + */ + ASSIGN_TABLE_REGION_DO_ASSIGN(4, 5), + /** + * ASSIGN_TABLE_REGION_VALIDATE = 6; + */ + ASSIGN_TABLE_REGION_VALIDATE(5, 6), + /** + * ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN = 7; + */ + ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN(6, 7), + /** + * ASSIGN_TABLE_REGION_MARK_FAILED_OPEN = 8; + */ + ASSIGN_TABLE_REGION_MARK_FAILED_OPEN(7, 8), + /** + * ASSIGN_TABLE_REGION_POST_OPERATION = 9; + */ + ASSIGN_TABLE_REGION_POST_OPERATION(8, 9), + ; + + /** + * ASSIGN_TABLE_REGION_PREPARE = 1; + */ + public static final int ASSIGN_TABLE_REGION_PREPARE_VALUE = 1; + /** + * ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION = 2; + */ + public static final int ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION_VALUE = 2; + /** + * ASSIGN_TABLE_REGION_CHECK_OFFLINE = 3; + */ + public static final int ASSIGN_TABLE_REGION_CHECK_OFFLINE_VALUE = 3; + /** + * ASSIGN_TABLE_REGION_GENERATE_PLAN = 4; + */ + public static final int ASSIGN_TABLE_REGION_GENERATE_PLAN_VALUE = 4; + /** + * ASSIGN_TABLE_REGION_DO_ASSIGN = 5; + */ + public static final int ASSIGN_TABLE_REGION_DO_ASSIGN_VALUE = 5; + /** + * ASSIGN_TABLE_REGION_VALIDATE = 6; + */ + public static final int ASSIGN_TABLE_REGION_VALIDATE_VALUE = 6; + /** + * ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN = 7; + */ + public static final int ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN_VALUE = 7; + /** + * ASSIGN_TABLE_REGION_MARK_FAILED_OPEN = 8; + */ + public static final int ASSIGN_TABLE_REGION_MARK_FAILED_OPEN_VALUE = 8; + /** + * ASSIGN_TABLE_REGION_POST_OPERATION = 9; + */ + public static final int ASSIGN_TABLE_REGION_POST_OPERATION_VALUE = 9; + + + public final int getNumber() { return value; } + + public static AssignTableRegionState valueOf(int value) { + switch (value) { + case 1: return ASSIGN_TABLE_REGION_PREPARE; + case 2: return ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION; + case 3: return ASSIGN_TABLE_REGION_CHECK_OFFLINE; + case 4: return ASSIGN_TABLE_REGION_GENERATE_PLAN; + case 5: return ASSIGN_TABLE_REGION_DO_ASSIGN; + case 6: return ASSIGN_TABLE_REGION_VALIDATE; + case 7: return ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN; + case 8: return ASSIGN_TABLE_REGION_MARK_FAILED_OPEN; + case 9: return ASSIGN_TABLE_REGION_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AssignTableRegionState findValueByNumber(int number) { + return AssignTableRegionState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(14); + } + + private static final AssignTableRegionState[] VALUES = values(); + + public static AssignTableRegionState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private AssignTableRegionState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.AssignTableRegionState) + } + + /** + * Protobuf enum {@code hbase.pb.AssignRegionToServerState} + */ + public enum AssignRegionToServerState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ASSIGN_REGION_PREPARE = 1; + */ + ASSIGN_REGION_PREPARE(0, 1), + /** + * ASSIGN_REGION_TO_SERVER = 2; + */ + ASSIGN_REGION_TO_SERVER(1, 2), + /** + * ASSIGN_REGION_CHECK_REGION_STATE = 3; + */ + ASSIGN_REGION_CHECK_REGION_STATE(2, 3), + /** + * ASSIGN_REGION_SET_FAILED_OPEN = 4; + */ + ASSIGN_REGION_SET_FAILED_OPEN(3, 4), + /** + * ASSIGN_REGION_MARK_OPEN = 5; + */ + ASSIGN_REGION_MARK_OPEN(4, 5), + ; + + /** + * ASSIGN_REGION_PREPARE = 1; + */ + public static final int ASSIGN_REGION_PREPARE_VALUE = 1; + /** + * ASSIGN_REGION_TO_SERVER = 2; + */ + public static final int ASSIGN_REGION_TO_SERVER_VALUE = 2; + /** + * ASSIGN_REGION_CHECK_REGION_STATE = 3; + */ + public static final int ASSIGN_REGION_CHECK_REGION_STATE_VALUE = 3; + /** + * ASSIGN_REGION_SET_FAILED_OPEN = 4; + */ + public static final int ASSIGN_REGION_SET_FAILED_OPEN_VALUE = 4; + /** + * ASSIGN_REGION_MARK_OPEN = 5; + */ + public static final int ASSIGN_REGION_MARK_OPEN_VALUE = 5; + + + public final int getNumber() { return value; } + + public static AssignRegionToServerState valueOf(int value) { + switch (value) { + case 1: return ASSIGN_REGION_PREPARE; + case 2: return ASSIGN_REGION_TO_SERVER; + case 3: return ASSIGN_REGION_CHECK_REGION_STATE; + case 4: return ASSIGN_REGION_SET_FAILED_OPEN; + case 5: return ASSIGN_REGION_MARK_OPEN; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AssignRegionToServerState findValueByNumber(int number) { + return AssignRegionToServerState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15); + } + + private static final AssignRegionToServerState[] VALUES = values(); + + public static AssignRegionToServerState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private AssignRegionToServerState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.AssignRegionToServerState) + } + + /** + * Protobuf enum {@code hbase.pb.UnassignRegionFromServerState} + */ + public enum UnassignRegionFromServerState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * UNASSIGN_REGION_PREPARE = 1; + */ + UNASSIGN_REGION_PREPARE(0, 1), + /** + * UNASSIGN_REGION_FROM_SERVER = 2; + */ + UNASSIGN_REGION_FROM_SERVER(1, 2), + /** + * UNASSIGN_REGION_CHECK_REGION_STATE = 3; + */ + UNASSIGN_REGION_CHECK_REGION_STATE(2, 3), + /** + * UNASSIGN_REGION_SET_FAILED_CLOSE = 4; + */ + UNASSIGN_REGION_SET_FAILED_CLOSE(3, 4), + /** + * UNASSIGN_REGION_MARK_CLOSE = 5; + */ + UNASSIGN_REGION_MARK_CLOSE(4, 5), + ; + + /** + * UNASSIGN_REGION_PREPARE = 1; + */ + public static final int UNASSIGN_REGION_PREPARE_VALUE = 1; + /** + * UNASSIGN_REGION_FROM_SERVER = 2; + */ + public static final int UNASSIGN_REGION_FROM_SERVER_VALUE = 2; + /** + * UNASSIGN_REGION_CHECK_REGION_STATE = 3; + */ + public static final int UNASSIGN_REGION_CHECK_REGION_STATE_VALUE = 3; + /** + * UNASSIGN_REGION_SET_FAILED_CLOSE = 4; + */ + public static final int UNASSIGN_REGION_SET_FAILED_CLOSE_VALUE = 4; + /** + * UNASSIGN_REGION_MARK_CLOSE = 5; + */ + public static final int UNASSIGN_REGION_MARK_CLOSE_VALUE = 5; + + + public final int getNumber() { return value; } + + public static UnassignRegionFromServerState valueOf(int value) { + switch (value) { + case 1: return UNASSIGN_REGION_PREPARE; + case 2: return UNASSIGN_REGION_FROM_SERVER; + case 3: return UNASSIGN_REGION_CHECK_REGION_STATE; + case 4: return UNASSIGN_REGION_SET_FAILED_CLOSE; + case 5: return UNASSIGN_REGION_MARK_CLOSE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public UnassignRegionFromServerState findValueByNumber(int number) { + return UnassignRegionFromServerState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(16); + } + + private static final UnassignRegionFromServerState[] VALUES = values(); + + public static UnassignRegionFromServerState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private UnassignRegionFromServerState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.UnassignRegionFromServerState) + } + + /** * Protobuf enum {@code hbase.pb.ServerCrashState} */ public enum ServerCrashState @@ -1725,7 +2088,7 @@ public final class MasterProcedureProtos { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(14); + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(17); } private static final ServerCrashState[] VALUES = values(); @@ -19078,45 +19441,3074 @@ public final class MasterProcedureProtos { // @@protoc_insertion_point(class_scope:hbase.pb.RestoreSnapshotStateData) } - public interface ServerCrashStateDataOrBuilder + public interface AssignTableRegionStateDataOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.ServerName server_name = 1; + // required .hbase.pb.TableName table_name = 1; /** - * required .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.TableName table_name = 1; */ - boolean hasServerName(); + boolean hasTableName(); /** - * required .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.TableName table_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); /** - * required .hbase.pb.ServerName server_name = 1; + * required .hbase.pb.TableName table_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); - // optional bool distributed_log_replay = 2; + // required .hbase.pb.RegionInfo region_info = 2; /** - * optional bool distributed_log_replay = 2; + * required .hbase.pb.RegionInfo region_info = 2; */ - boolean hasDistributedLogReplay(); + boolean hasRegionInfo(); /** - * optional bool distributed_log_replay = 2; + * required .hbase.pb.RegionInfo region_info = 2; */ - boolean getDistributedLogReplay(); - - // repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + * required .hbase.pb.RegionInfo region_info = 2; */ - java.util.List - getRegionsOnCrashedServerList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + + // optional bool force_new_plan = 3 [default = false]; /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + * optional bool force_new_plan = 3 [default = false]; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionsOnCrashedServer(int index); + boolean hasForceNewPlan(); /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + * optional bool force_new_plan = 3 [default = false]; + */ + boolean getForceNewPlan(); + + // optional .hbase.pb.ServerName destination_server = 4; + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + boolean hasDestinationServer(); + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDestinationServer(); + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder(); + + // required uint32 retry_count = 5; + /** + * required uint32 retry_count = 5; + */ + boolean hasRetryCount(); + /** + * required uint32 retry_count = 5; + */ + int getRetryCount(); + + // required uint64 start_time = 6; + /** + * required uint64 start_time = 6; + */ + boolean hasStartTime(); + /** + * required uint64 start_time = 6; + */ + long getStartTime(); + } + /** + * Protobuf type {@code hbase.pb.AssignTableRegionStateData} + */ + public static final class AssignTableRegionStateData extends + com.google.protobuf.GeneratedMessage + implements AssignTableRegionStateDataOrBuilder { + // Use AssignTableRegionStateData.newBuilder() to construct. + private AssignTableRegionStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AssignTableRegionStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AssignTableRegionStateData defaultInstance; + public static AssignTableRegionStateData getDefaultInstance() { + return defaultInstance; + } + + public AssignTableRegionStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AssignTableRegionStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + forceNewPlan_ = input.readBool(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = destinationServer_.toBuilder(); + } + destinationServer_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(destinationServer_); + destinationServer_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 40: { + bitField0_ |= 0x00000010; + retryCount_ = input.readUInt32(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + startTime_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignTableRegionStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignTableRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AssignTableRegionStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AssignTableRegionStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required .hbase.pb.RegionInfo region_info = 2; + public static final int REGION_INFO_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_; + } + + // optional bool force_new_plan = 3 [default = false]; + public static final int FORCE_NEW_PLAN_FIELD_NUMBER = 3; + private boolean forceNewPlan_; + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean hasForceNewPlan() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean getForceNewPlan() { + return forceNewPlan_; + } + + // optional .hbase.pb.ServerName destination_server = 4; + public static final int DESTINATION_SERVER_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName destinationServer_; + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public boolean hasDestinationServer() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDestinationServer() { + return destinationServer_; + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder() { + return destinationServer_; + } + + // required uint32 retry_count = 5; + public static final int RETRY_COUNT_FIELD_NUMBER = 5; + private int retryCount_; + /** + * required uint32 retry_count = 5; + */ + public boolean hasRetryCount() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint32 retry_count = 5; + */ + public int getRetryCount() { + return retryCount_; + } + + // required uint64 start_time = 6; + public static final int START_TIME_FIELD_NUMBER = 6; + private long startTime_; + /** + * required uint64 start_time = 6; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required uint64 start_time = 6; + */ + public long getStartTime() { + return startTime_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + forceNewPlan_ = false; + destinationServer_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + retryCount_ = 0; + startTime_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRetryCount()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTime()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasDestinationServer()) { + if (!getDestinationServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, regionInfo_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, forceNewPlan_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, destinationServer_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt32(5, retryCount_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeUInt64(6, startTime_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, regionInfo_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, forceNewPlan_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, destinationServer_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(5, retryCount_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, startTime_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && (hasForceNewPlan() == other.hasForceNewPlan()); + if (hasForceNewPlan()) { + result = result && (getForceNewPlan() + == other.getForceNewPlan()); + } + result = result && (hasDestinationServer() == other.hasDestinationServer()); + if (hasDestinationServer()) { + result = result && getDestinationServer() + .equals(other.getDestinationServer()); + } + result = result && (hasRetryCount() == other.hasRetryCount()); + if (hasRetryCount()) { + result = result && (getRetryCount() + == other.getRetryCount()); + } + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasRegionInfo()) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + if (hasForceNewPlan()) { + hash = (37 * hash) + FORCE_NEW_PLAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getForceNewPlan()); + } + if (hasDestinationServer()) { + hash = (37 * hash) + DESTINATION_SERVER_FIELD_NUMBER; + hash = (53 * hash) + getDestinationServer().hashCode(); + } + if (hasRetryCount()) { + hash = (37 * hash) + RETRY_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getRetryCount(); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AssignTableRegionStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignTableRegionStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignTableRegionStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + getRegionInfoFieldBuilder(); + getDestinationServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + forceNewPlan_ = false; + bitField0_ = (bitField0_ & ~0x00000004); + if (destinationServerBuilder_ == null) { + destinationServer_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + destinationServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + retryCount_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignTableRegionStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.forceNewPlan_ = forceNewPlan_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (destinationServerBuilder_ == null) { + result.destinationServer_ = destinationServer_; + } else { + result.destinationServer_ = destinationServerBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.retryCount_ = retryCount_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.startTime_ = startTime_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + if (other.hasForceNewPlan()) { + setForceNewPlan(other.getForceNewPlan()); + } + if (other.hasDestinationServer()) { + mergeDestinationServer(other.getDestinationServer()); + } + if (other.hasRetryCount()) { + setRetryCount(other.getRetryCount()); + } + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!hasRegionInfo()) { + + return false; + } + if (!hasRetryCount()) { + + return false; + } + if (!hasStartTime()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (!getRegionInfo().isInitialized()) { + + return false; + } + if (hasDestinationServer()) { + if (!getDestinationServer().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required .hbase.pb.RegionInfo region_info = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_; + } + } + /** + * required .hbase.pb.RegionInfo region_info = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // optional bool force_new_plan = 3 [default = false]; + private boolean forceNewPlan_ ; + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean hasForceNewPlan() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public boolean getForceNewPlan() { + return forceNewPlan_; + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public Builder setForceNewPlan(boolean value) { + bitField0_ |= 0x00000004; + forceNewPlan_ = value; + onChanged(); + return this; + } + /** + * optional bool force_new_plan = 3 [default = false]; + */ + public Builder clearForceNewPlan() { + bitField0_ = (bitField0_ & ~0x00000004); + forceNewPlan_ = false; + onChanged(); + return this; + } + + // optional .hbase.pb.ServerName destination_server = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName destinationServer_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> destinationServerBuilder_; + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public boolean hasDestinationServer() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDestinationServer() { + if (destinationServerBuilder_ == null) { + return destinationServer_; + } else { + return destinationServerBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public Builder setDestinationServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (destinationServerBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + destinationServer_ = value; + onChanged(); + } else { + destinationServerBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public Builder setDestinationServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (destinationServerBuilder_ == null) { + destinationServer_ = builderForValue.build(); + onChanged(); + } else { + destinationServerBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public Builder mergeDestinationServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (destinationServerBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + destinationServer_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + destinationServer_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(destinationServer_).mergeFrom(value).buildPartial(); + } else { + destinationServer_ = value; + } + onChanged(); + } else { + destinationServerBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public Builder clearDestinationServer() { + if (destinationServerBuilder_ == null) { + destinationServer_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + destinationServerBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getDestinationServerBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getDestinationServerFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDestinationServerOrBuilder() { + if (destinationServerBuilder_ != null) { + return destinationServerBuilder_.getMessageOrBuilder(); + } else { + return destinationServer_; + } + } + /** + * optional .hbase.pb.ServerName destination_server = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getDestinationServerFieldBuilder() { + if (destinationServerBuilder_ == null) { + destinationServerBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + destinationServer_, + getParentForChildren(), + isClean()); + destinationServer_ = null; + } + return destinationServerBuilder_; + } + + // required uint32 retry_count = 5; + private int retryCount_ ; + /** + * required uint32 retry_count = 5; + */ + public boolean hasRetryCount() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint32 retry_count = 5; + */ + public int getRetryCount() { + return retryCount_; + } + /** + * required uint32 retry_count = 5; + */ + public Builder setRetryCount(int value) { + bitField0_ |= 0x00000010; + retryCount_ = value; + onChanged(); + return this; + } + /** + * required uint32 retry_count = 5; + */ + public Builder clearRetryCount() { + bitField0_ = (bitField0_ & ~0x00000010); + retryCount_ = 0; + onChanged(); + return this; + } + + // required uint64 start_time = 6; + private long startTime_ ; + /** + * required uint64 start_time = 6; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required uint64 start_time = 6; + */ + public long getStartTime() { + return startTime_; + } + /** + * required uint64 start_time = 6; + */ + public Builder setStartTime(long value) { + bitField0_ |= 0x00000020; + startTime_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_time = 6; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000020); + startTime_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AssignTableRegionStateData) + } + + static { + defaultInstance = new AssignTableRegionStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AssignTableRegionStateData) + } + + public interface AssignRegionToServerDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.ServerName server_name = 1; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + boolean hasServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // required .hbase.pb.RegionInfo region_to_assign = 2; + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + boolean hasRegionToAssign(); + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionToAssign(); + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionToAssignOrBuilder(); + + // required uint32 retry_count = 3; + /** + * required uint32 retry_count = 3; + */ + boolean hasRetryCount(); + /** + * required uint32 retry_count = 3; + */ + int getRetryCount(); + } + /** + * Protobuf type {@code hbase.pb.AssignRegionToServerData} + */ + public static final class AssignRegionToServerData extends + com.google.protobuf.GeneratedMessage + implements AssignRegionToServerDataOrBuilder { + // Use AssignRegionToServerData.newBuilder() to construct. + private AssignRegionToServerData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AssignRegionToServerData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AssignRegionToServerData defaultInstance; + public static AssignRegionToServerData getDefaultInstance() { + return defaultInstance; + } + + public AssignRegionToServerData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AssignRegionToServerData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = serverName_.toBuilder(); + } + serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serverName_); + serverName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionToAssign_.toBuilder(); + } + regionToAssign_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionToAssign_); + regionToAssign_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + retryCount_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionToServerData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionToServerData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AssignRegionToServerData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AssignRegionToServerData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public boolean hasServerName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; + } + + // required .hbase.pb.RegionInfo region_to_assign = 2; + public static final int REGION_TO_ASSIGN_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionToAssign_; + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public boolean hasRegionToAssign() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionToAssign() { + return regionToAssign_; + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionToAssignOrBuilder() { + return regionToAssign_; + } + + // required uint32 retry_count = 3; + public static final int RETRY_COUNT_FIELD_NUMBER = 3; + private int retryCount_; + /** + * required uint32 retry_count = 3; + */ + public boolean hasRetryCount() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 retry_count = 3; + */ + public int getRetryCount() { + return retryCount_; + } + + private void initFields() { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + regionToAssign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + retryCount_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionToAssign()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRetryCount()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionToAssign().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, regionToAssign_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, retryCount_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, regionToAssign_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, retryCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData) obj; + + boolean result = true; + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && (hasRegionToAssign() == other.hasRegionToAssign()); + if (hasRegionToAssign()) { + result = result && getRegionToAssign() + .equals(other.getRegionToAssign()); + } + result = result && (hasRetryCount() == other.hasRetryCount()); + if (hasRetryCount()) { + result = result && (getRetryCount() + == other.getRetryCount()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServerName()) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); + } + if (hasRegionToAssign()) { + hash = (37 * hash) + REGION_TO_ASSIGN_FIELD_NUMBER; + hash = (53 * hash) + getRegionToAssign().hashCode(); + } + if (hasRetryCount()) { + hash = (37 * hash) + RETRY_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getRetryCount(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AssignRegionToServerData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionToServerData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionToServerData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); + getRegionToAssignFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (regionToAssignBuilder_ == null) { + regionToAssign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionToAssignBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + retryCount_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_AssignRegionToServerData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (regionToAssignBuilder_ == null) { + result.regionToAssign_ = regionToAssign_; + } else { + result.regionToAssign_ = regionToAssignBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.retryCount_ = retryCount_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData.getDefaultInstance()) return this; + if (other.hasServerName()) { + mergeServerName(other.getServerName()); + } + if (other.hasRegionToAssign()) { + mergeRegionToAssign(other.getRegionToAssign()); + } + if (other.hasRetryCount()) { + setRetryCount(other.getRetryCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServerName()) { + + return false; + } + if (!hasRegionToAssign()) { + + return false; + } + if (!hasRetryCount()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; + } + if (!getRegionToAssign().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.ServerName server_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public boolean hasServerName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; + } else { + return serverNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serverName_ = value; + onChanged(); + } else { + serverNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); + onChanged(); + } else { + serverNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); + } else { + serverName_ = value; + } + onChanged(); + } else { + serverNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); + } else { + return serverName_; + } + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; + } + + // required .hbase.pb.RegionInfo region_to_assign = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionToAssign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionToAssignBuilder_; + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public boolean hasRegionToAssign() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionToAssign() { + if (regionToAssignBuilder_ == null) { + return regionToAssign_; + } else { + return regionToAssignBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public Builder setRegionToAssign(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionToAssignBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionToAssign_ = value; + onChanged(); + } else { + regionToAssignBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public Builder setRegionToAssign( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionToAssignBuilder_ == null) { + regionToAssign_ = builderForValue.build(); + onChanged(); + } else { + regionToAssignBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public Builder mergeRegionToAssign(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionToAssignBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + regionToAssign_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionToAssign_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionToAssign_).mergeFrom(value).buildPartial(); + } else { + regionToAssign_ = value; + } + onChanged(); + } else { + regionToAssignBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public Builder clearRegionToAssign() { + if (regionToAssignBuilder_ == null) { + regionToAssign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionToAssignBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionToAssignBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRegionToAssignFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionToAssignOrBuilder() { + if (regionToAssignBuilder_ != null) { + return regionToAssignBuilder_.getMessageOrBuilder(); + } else { + return regionToAssign_; + } + } + /** + * required .hbase.pb.RegionInfo region_to_assign = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionToAssignFieldBuilder() { + if (regionToAssignBuilder_ == null) { + regionToAssignBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionToAssign_, + getParentForChildren(), + isClean()); + regionToAssign_ = null; + } + return regionToAssignBuilder_; + } + + // required uint32 retry_count = 3; + private int retryCount_ ; + /** + * required uint32 retry_count = 3; + */ + public boolean hasRetryCount() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 retry_count = 3; + */ + public int getRetryCount() { + return retryCount_; + } + /** + * required uint32 retry_count = 3; + */ + public Builder setRetryCount(int value) { + bitField0_ |= 0x00000004; + retryCount_ = value; + onChanged(); + return this; + } + /** + * required uint32 retry_count = 3; + */ + public Builder clearRetryCount() { + bitField0_ = (bitField0_ & ~0x00000004); + retryCount_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AssignRegionToServerData) + } + + static { + defaultInstance = new AssignRegionToServerData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AssignRegionToServerData) + } + + public interface UnassignRegionFromServerDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.ServerName server_name = 1; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + boolean hasServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // required .hbase.pb.RegionInfo region_to_unassign = 2; + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + boolean hasRegionToUnassign(); + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionToUnassign(); + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionToUnassignOrBuilder(); + + // required uint32 retry_count = 3; + /** + * required uint32 retry_count = 3; + */ + boolean hasRetryCount(); + /** + * required uint32 retry_count = 3; + */ + int getRetryCount(); + } + /** + * Protobuf type {@code hbase.pb.UnassignRegionFromServerData} + */ + public static final class UnassignRegionFromServerData extends + com.google.protobuf.GeneratedMessage + implements UnassignRegionFromServerDataOrBuilder { + // Use UnassignRegionFromServerData.newBuilder() to construct. + private UnassignRegionFromServerData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private UnassignRegionFromServerData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UnassignRegionFromServerData defaultInstance; + public static UnassignRegionFromServerData getDefaultInstance() { + return defaultInstance; + } + + public UnassignRegionFromServerData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private UnassignRegionFromServerData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = serverName_.toBuilder(); + } + serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serverName_); + serverName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = regionToUnassign_.toBuilder(); + } + regionToUnassign_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionToUnassign_); + regionToUnassign_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + retryCount_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionFromServerData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionFromServerData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UnassignRegionFromServerData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UnassignRegionFromServerData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.ServerName server_name = 1; + public static final int SERVER_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public boolean hasServerName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + return serverName_; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + return serverName_; + } + + // required .hbase.pb.RegionInfo region_to_unassign = 2; + public static final int REGION_TO_UNASSIGN_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionToUnassign_; + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public boolean hasRegionToUnassign() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionToUnassign() { + return regionToUnassign_; + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionToUnassignOrBuilder() { + return regionToUnassign_; + } + + // required uint32 retry_count = 3; + public static final int RETRY_COUNT_FIELD_NUMBER = 3; + private int retryCount_; + /** + * required uint32 retry_count = 3; + */ + public boolean hasRetryCount() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 retry_count = 3; + */ + public int getRetryCount() { + return retryCount_; + } + + private void initFields() { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + regionToUnassign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + retryCount_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServerName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRegionToUnassign()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRetryCount()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServerName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionToUnassign().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, regionToUnassign_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, retryCount_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, serverName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, regionToUnassign_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, retryCount_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData) obj; + + boolean result = true; + result = result && (hasServerName() == other.hasServerName()); + if (hasServerName()) { + result = result && getServerName() + .equals(other.getServerName()); + } + result = result && (hasRegionToUnassign() == other.hasRegionToUnassign()); + if (hasRegionToUnassign()) { + result = result && getRegionToUnassign() + .equals(other.getRegionToUnassign()); + } + result = result && (hasRetryCount() == other.hasRetryCount()); + if (hasRetryCount()) { + result = result && (getRetryCount() + == other.getRetryCount()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServerName()) { + hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; + hash = (53 * hash) + getServerName().hashCode(); + } + if (hasRegionToUnassign()) { + hash = (37 * hash) + REGION_TO_UNASSIGN_FIELD_NUMBER; + hash = (53 * hash) + getRegionToUnassign().hashCode(); + } + if (hasRetryCount()) { + hash = (37 * hash) + RETRY_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getRetryCount(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.UnassignRegionFromServerData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionFromServerData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionFromServerData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerNameFieldBuilder(); + getRegionToUnassignFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (regionToUnassignBuilder_ == null) { + regionToUnassign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionToUnassignBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + retryCount_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_UnassignRegionFromServerData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverNameBuilder_ == null) { + result.serverName_ = serverName_; + } else { + result.serverName_ = serverNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (regionToUnassignBuilder_ == null) { + result.regionToUnassign_ = regionToUnassign_; + } else { + result.regionToUnassign_ = regionToUnassignBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.retryCount_ = retryCount_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData.getDefaultInstance()) return this; + if (other.hasServerName()) { + mergeServerName(other.getServerName()); + } + if (other.hasRegionToUnassign()) { + mergeRegionToUnassign(other.getRegionToUnassign()); + } + if (other.hasRetryCount()) { + setRetryCount(other.getRetryCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServerName()) { + + return false; + } + if (!hasRegionToUnassign()) { + + return false; + } + if (!hasRetryCount()) { + + return false; + } + if (!getServerName().isInitialized()) { + + return false; + } + if (!getRegionToUnassign().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.ServerName server_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public boolean hasServerName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() { + if (serverNameBuilder_ == null) { + return serverName_; + } else { + return serverNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + serverName_ = value; + onChanged(); + } else { + serverNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder setServerName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverNameBuilder_ == null) { + serverName_ = builderForValue.build(); + onChanged(); + } else { + serverNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + serverName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial(); + } else { + serverName_ = value; + } + onChanged(); + } else { + serverNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public Builder clearServerName() { + if (serverNameBuilder_ == null) { + serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() { + if (serverNameBuilder_ != null) { + return serverNameBuilder_.getMessageOrBuilder(); + } else { + return serverName_; + } + } + /** + * required .hbase.pb.ServerName server_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerNameFieldBuilder() { + if (serverNameBuilder_ == null) { + serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + serverName_, + getParentForChildren(), + isClean()); + serverName_ = null; + } + return serverNameBuilder_; + } + + // required .hbase.pb.RegionInfo region_to_unassign = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionToUnassign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionToUnassignBuilder_; + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public boolean hasRegionToUnassign() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionToUnassign() { + if (regionToUnassignBuilder_ == null) { + return regionToUnassign_; + } else { + return regionToUnassignBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public Builder setRegionToUnassign(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionToUnassignBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionToUnassign_ = value; + onChanged(); + } else { + regionToUnassignBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public Builder setRegionToUnassign( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionToUnassignBuilder_ == null) { + regionToUnassign_ = builderForValue.build(); + onChanged(); + } else { + regionToUnassignBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public Builder mergeRegionToUnassign(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionToUnassignBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + regionToUnassign_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionToUnassign_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionToUnassign_).mergeFrom(value).buildPartial(); + } else { + regionToUnassign_ = value; + } + onChanged(); + } else { + regionToUnassignBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public Builder clearRegionToUnassign() { + if (regionToUnassignBuilder_ == null) { + regionToUnassign_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionToUnassignBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionToUnassignBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getRegionToUnassignFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionToUnassignOrBuilder() { + if (regionToUnassignBuilder_ != null) { + return regionToUnassignBuilder_.getMessageOrBuilder(); + } else { + return regionToUnassign_; + } + } + /** + * required .hbase.pb.RegionInfo region_to_unassign = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionToUnassignFieldBuilder() { + if (regionToUnassignBuilder_ == null) { + regionToUnassignBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionToUnassign_, + getParentForChildren(), + isClean()); + regionToUnassign_ = null; + } + return regionToUnassignBuilder_; + } + + // required uint32 retry_count = 3; + private int retryCount_ ; + /** + * required uint32 retry_count = 3; + */ + public boolean hasRetryCount() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 retry_count = 3; + */ + public int getRetryCount() { + return retryCount_; + } + /** + * required uint32 retry_count = 3; + */ + public Builder setRetryCount(int value) { + bitField0_ |= 0x00000004; + retryCount_ = value; + onChanged(); + return this; + } + /** + * required uint32 retry_count = 3; + */ + public Builder clearRetryCount() { + bitField0_ = (bitField0_ & ~0x00000004); + retryCount_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.UnassignRegionFromServerData) + } + + static { + defaultInstance = new UnassignRegionFromServerData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.UnassignRegionFromServerData) + } + + public interface ServerCrashStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.ServerName server_name = 1; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + boolean hasServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // optional bool distributed_log_replay = 2; + /** + * optional bool distributed_log_replay = 2; + */ + boolean hasDistributedLogReplay(); + /** + * optional bool distributed_log_replay = 2; + */ + boolean getDistributedLogReplay(); + + // repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + java.util.List + getRegionsOnCrashedServerList(); + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionsOnCrashedServer(int index); + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; */ int getRegionsOnCrashedServerCount(); /** @@ -20745,6 +24137,21 @@ public final class MasterProcedureProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AssignTableRegionStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AssignTableRegionStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AssignRegionToServerData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AssignRegionToServerData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_UnassignRegionFromServerData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_UnassignRegionFromServerData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ServerCrashStateData_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -20832,102 +24239,135 @@ public final class MasterProcedureProtos { "o\0221\n\023region_info_for_add\030\006 \003(\0132\024.hbase.p" + "b.RegionInfo\022T\n!parent_to_child_regions_" + "pair_list\030\007 \003(\0132).hbase.pb.RestoreParent" + - "ToChildRegionsPair\"\201\002\n\024ServerCrashStateD" + - "ata\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Serv" + - "erName\022\036\n\026distributed_log_replay\030\002 \001(\010\0227" + - "\n\031regions_on_crashed_server\030\003 \003(\0132\024.hbas" + - "e.pb.RegionInfo\022.\n\020regions_assigned\030\004 \003(" + - "\0132\024.hbase.pb.RegionInfo\022\025\n\rcarrying_meta", - "\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004true*\330" + - "\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE_" + - "OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAY" + - "OUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033C" + - "REATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_T" + - "ABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE" + - "_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022\030" + - "\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE" + - "_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE_" + - "TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOV", - "E_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELET" + - "E_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERA" + - "TION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGION" + - "S\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_T" + - "ABLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_R" + - "EMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEA" + - "R_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_F" + - "S_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META" + - "\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n" + - "\035TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020Del", - "eteTableState\022\036\n\032DELETE_TABLE_PRE_OPERAT" + - "ION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020\002" + - "\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DE" + - "LETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE" + - "_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABL" + - "E_POST_OPERATION\020\006*\320\001\n\024CreateNamespaceSt" + - "ate\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CRE" + - "ATE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n%CREA" + - "TE_NAMESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032C" + - "REATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_NA", - "MESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024ModifyN" + - "amespaceState\022\034\n\030MODIFY_NAMESPACE_PREPAR" + - "E\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_TABLE\020" + - "\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024De" + - "leteNamespaceState\022\034\n\030DELETE_NAMESPACE_P" + - "REPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE_FROM" + - "_NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_F" + - "ROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIRE" + - "CTORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NAM" + - "ESPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyState\022", - "\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_CO" + - "LUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUM" + - "N_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n AD" + - "D_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_" + - "COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027M" + - "odifyColumnFamilyState\022 \n\034MODIFY_COLUMN_" + - "FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY" + - "_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY" + - "_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COL" + - "UMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_CO", - "LUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027Del" + - "eteColumnFamilyState\022 \n\034DELETE_COLUMN_FA" + - "MILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_P" + - "RE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_U" + - "PDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUM" + - "N_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_CO" + - "LUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_C" + - "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020En" + - "ableTableState\022\030\n\024ENABLE_TABLE_PREPARE\020\001" + - "\022\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENAB", - "LE_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n E" + - "NABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENA" + - "BLE_TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033E" + - "NABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Disable" + - "TableState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n" + - "\033DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABL" + - "E_TABLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"D" + - "ISABLE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&D" + - "ISABLE_TABLE_SET_DISABLED_TABLE_STATE\020\005\022" + - " \n\034DISABLE_TABLE_POST_OPERATION\020\006*\346\001\n\022Cl", - "oneSnapshotState\022 \n\034CLONE_SNAPSHOT_PRE_O" + - "PERATION\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_FS_LA" + - "YOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_TO_META\020\003\022!" + - "\n\035CLONE_SNAPSHOT_ASSIGN_REGIONS\020\004\022$\n CLO" + - "NE_SNAPSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035CLONE" + - "_SNAPSHOT_POST_OPERATION\020\006*\260\001\n\024RestoreSn" + - "apshotState\022\"\n\036RESTORE_SNAPSHOT_PRE_OPER" + - "ATION\020\001\022,\n(RESTORE_SNAPSHOT_UPDATE_TABLE" + - "_DESCRIPTOR\020\002\022$\n RESTORE_SNAPSHOT_WRITE_" + - "FS_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE_M", - "ETA\020\004*\234\002\n\020ServerCrashState\022\026\n\022SERVER_CRA" + - "SH_START\020\001\022\035\n\031SERVER_CRASH_PROCESS_META\020" + - "\002\022\034\n\030SERVER_CRASH_GET_REGIONS\020\003\022\036\n\032SERVE" + - "R_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_" + - "SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH_PREPARE_LOG" + - "_REPLAY\020\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033SE" + - "RVER_CRASH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_CR" + - "ASH_FINISH\020dBK\n*org.apache.hadoop.hbase." + - "protobuf.generatedB\025MasterProcedureProto" + - "sH\001\210\001\001\240\001\001" + "ToChildRegionsPair\"\352\001\n\032AssignTableRegion" + + "StateData\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" + + ".TableName\022)\n\013region_info\030\002 \002(\0132\024.hbase." + + "pb.RegionInfo\022\035\n\016force_new_plan\030\003 \001(\010:\005f" + + "alse\0220\n\022destination_server\030\004 \001(\0132\024.hbase" + + ".pb.ServerName\022\023\n\013retry_count\030\005 \002(\r\022\022\n\ns", + "tart_time\030\006 \002(\004\"\212\001\n\030AssignRegionToServer" + + "Data\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Ser" + + "verName\022.\n\020region_to_assign\030\002 \002(\0132\024.hbas" + + "e.pb.RegionInfo\022\023\n\013retry_count\030\003 \002(\r\"\220\001\n" + + "\034UnassignRegionFromServerData\022)\n\013server_" + + "name\030\001 \002(\0132\024.hbase.pb.ServerName\0220\n\022regi" + + "on_to_unassign\030\002 \002(\0132\024.hbase.pb.RegionIn" + + "fo\022\023\n\013retry_count\030\003 \002(\r\"\201\002\n\024ServerCrashS" + + "tateData\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb" + + ".ServerName\022\036\n\026distributed_log_replay\030\002 ", + "\001(\010\0227\n\031regions_on_crashed_server\030\003 \003(\0132\024" + + ".hbase.pb.RegionInfo\022.\n\020regions_assigned" + + "\030\004 \003(\0132\024.hbase.pb.RegionInfo\022\025\n\rcarrying" + + "_meta\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004t" + + "rue*\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE" + + "_PRE_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_F" + + "S_LAYOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003" + + "\022\037\n\033CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CRE" + + "ATE_TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_" + + "TABLE_POST_OPERATION\020\006*\207\002\n\020ModifyTableSt", + "ate\022\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_" + + "TABLE_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UP" + + "DATE_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_" + + "REMOVE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_" + + "DELETE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_" + + "OPERATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_R" + + "EGIONS\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNC" + + "ATE_TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TA" + + "BLE_REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE" + + "_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CRE", + "ATE_FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO" + + "_META\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS" + + "\020\006\022!\n\035TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001" + + "\n\020DeleteTableState\022\036\n\032DELETE_TABLE_PRE_O" + + "PERATION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_M" + + "ETA\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022" + + "\"\n\036DELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035D" + + "ELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE" + + "_TABLE_POST_OPERATION\020\006*\320\001\n\024CreateNamesp" + + "aceState\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%", + "\n!CREATE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n" + + "%CREATE_NAMESPACE_INSERT_INTO_NS_TABLE\020\003" + + "\022\036\n\032CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREA" + + "TE_NAMESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024Mo" + + "difyNamespaceState\022\034\n\030MODIFY_NAMESPACE_P" + + "REPARE\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_T" + + "ABLE\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332" + + "\001\n\024DeleteNamespaceState\022\034\n\030DELETE_NAMESP" + + "ACE_PREPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE" + + "_FROM_NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REM", + "OVE_FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE" + + "_DIRECTORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOV" + + "E_NAMESPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyS" + + "tate\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037A" + + "DD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_" + + "COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022" + + "$\n ADD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n" + + "$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*" + + "\353\001\n\027ModifyColumnFamilyState\022 \n\034MODIFY_CO" + + "LUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_F", + "AMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_F" + + "AMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIF" + + "Y_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODI" + + "FY_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002" + + "\n\027DeleteColumnFamilyState\022 \n\034DELETE_COLU" + + "MN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAM" + + "ILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAM" + + "ILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_" + + "COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELE" + + "TE_COLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DEL", + "ETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350" + + "\001\n\020EnableTableState\022\030\n\024ENABLE_TABLE_PREP" + + "ARE\020\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n" + + "%ENABLE_TABLE_SET_ENABLING_TABLE_STATE\020\003" + + "\022$\n ENABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(" + + "\n$ENABLE_TABLE_SET_ENABLED_TABLE_STATE\020\005" + + "\022\037\n\033ENABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Di" + + "sableTableState\022\031\n\025DISABLE_TABLE_PREPARE" + + "\020\001\022\037\n\033DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'D" + + "ISABLE_TABLE_SET_DISABLING_TABLE_STATE\020\003", + "\022&\n\"DISABLE_TABLE_MARK_REGIONS_OFFLINE\020\004" + + "\022*\n&DISABLE_TABLE_SET_DISABLED_TABLE_STA" + + "TE\020\005\022 \n\034DISABLE_TABLE_POST_OPERATION\020\006*\346" + + "\001\n\022CloneSnapshotState\022 \n\034CLONE_SNAPSHOT_" + + "PRE_OPERATION\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_" + + "FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_TO_MET" + + "A\020\003\022!\n\035CLONE_SNAPSHOT_ASSIGN_REGIONS\020\004\022$" + + "\n CLONE_SNAPSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035" + + "CLONE_SNAPSHOT_POST_OPERATION\020\006*\260\001\n\024Rest" + + "oreSnapshotState\022\"\n\036RESTORE_SNAPSHOT_PRE", + "_OPERATION\020\001\022,\n(RESTORE_SNAPSHOT_UPDATE_" + + "TABLE_DESCRIPTOR\020\002\022$\n RESTORE_SNAPSHOT_W" + + "RITE_FS_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPD" + + "ATE_META\020\004*\207\003\n\026AssignTableRegionState\022\037\n" + + "\033ASSIGN_TABLE_REGION_PREPARE\020\001\022<\n8ASSIGN" + + "_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CL" + + "OSE_REGION\020\002\022%\n!ASSIGN_TABLE_REGION_CHEC" + + "K_OFFLINE\020\003\022%\n!ASSIGN_TABLE_REGION_GENER" + + "ATE_PLAN\020\004\022!\n\035ASSIGN_TABLE_REGION_DO_ASS" + + "IGN\020\005\022 \n\034ASSIGN_TABLE_REGION_VALIDATE\020\006\022", + ")\n%ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN" + + "\020\007\022(\n$ASSIGN_TABLE_REGION_MARK_FAILED_OP" + + "EN\020\010\022&\n\"ASSIGN_TABLE_REGION_POST_OPERATI" + + "ON\020\t*\271\001\n\031AssignRegionToServerState\022\031\n\025AS" + + "SIGN_REGION_PREPARE\020\001\022\033\n\027ASSIGN_REGION_T" + + "O_SERVER\020\002\022$\n ASSIGN_REGION_CHECK_REGION" + + "_STATE\020\003\022!\n\035ASSIGN_REGION_SET_FAILED_OPE" + + "N\020\004\022\033\n\027ASSIGN_REGION_MARK_OPEN\020\005*\313\001\n\035Una" + + "ssignRegionFromServerState\022\033\n\027UNASSIGN_R" + + "EGION_PREPARE\020\001\022\037\n\033UNASSIGN_REGION_FROM_", + "SERVER\020\002\022&\n\"UNASSIGN_REGION_CHECK_REGION" + + "_STATE\020\003\022$\n UNASSIGN_REGION_SET_FAILED_C" + + "LOSE\020\004\022\036\n\032UNASSIGN_REGION_MARK_CLOSE\020\005*\234" + + "\002\n\020ServerCrashState\022\026\n\022SERVER_CRASH_STAR" + + "T\020\001\022\035\n\031SERVER_CRASH_PROCESS_META\020\002\022\034\n\030SE" + + "RVER_CRASH_GET_REGIONS\020\003\022\036\n\032SERVER_CRASH" + + "_NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_SPLIT_L" + + "OGS\020\005\022#\n\037SERVER_CRASH_PREPARE_LOG_REPLAY" + + "\020\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033SERVER_CR" + + "ASH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_CRASH_FIN", + "ISH\020dBK\n*org.apache.hadoop.hbase.protobu" + + "f.generatedB\025MasterProcedureProtosH\001\210\001\001\240" + + "\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -21024,8 +24464,26 @@ public final class MasterProcedureProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotStateData_descriptor, new java.lang.String[] { "UserInfo", "Snapshot", "ModifiedTableSchema", "RegionInfoForRestore", "RegionInfoForRemove", "RegionInfoForAdd", "ParentToChildRegionsPairList", }); - internal_static_hbase_pb_ServerCrashStateData_descriptor = + internal_static_hbase_pb_AssignTableRegionStateData_descriptor = getDescriptor().getMessageTypes().get(15); + internal_static_hbase_pb_AssignTableRegionStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AssignTableRegionStateData_descriptor, + new java.lang.String[] { "TableName", "RegionInfo", "ForceNewPlan", "DestinationServer", "RetryCount", "StartTime", }); + internal_static_hbase_pb_AssignRegionToServerData_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_hbase_pb_AssignRegionToServerData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AssignRegionToServerData_descriptor, + new java.lang.String[] { "ServerName", "RegionToAssign", "RetryCount", }); + internal_static_hbase_pb_UnassignRegionFromServerData_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_hbase_pb_UnassignRegionFromServerData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_UnassignRegionFromServerData_descriptor, + new java.lang.String[] { "ServerName", "RegionToUnassign", "RetryCount", }); + internal_static_hbase_pb_ServerCrashStateData_descriptor = + getDescriptor().getMessageTypes().get(18); internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerCrashStateData_descriptor, diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java index 9b368a2..2aa7136 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java @@ -6295,6 +6295,20 @@ public final class ProcedureProtos { * optional uint64 proc_id = 3; */ long getProcId(); + + // repeated uint64 child_id = 4; + /** + * repeated uint64 child_id = 4; + */ + java.util.List getChildIdList(); + /** + * repeated uint64 child_id = 4; + */ + int getChildIdCount(); + /** + * repeated uint64 child_id = 4; + */ + long getChildId(int index); } /** * Protobuf type {@code hbase.pb.ProcedureWALEntry} @@ -6371,6 +6385,27 @@ public final class ProcedureProtos { procId_ = input.readUInt64(); break; } + case 32: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + childId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + childId_.add(input.readUInt64()); + break; + } + case 34: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008) && input.getBytesUntilLimit() > 0) { + childId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + while (input.getBytesUntilLimit() > 0) { + childId_.add(input.readUInt64()); + } + input.popLimit(limit); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -6382,6 +6417,9 @@ public final class ProcedureProtos { if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { procedure_ = java.util.Collections.unmodifiableList(procedure_); } + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + childId_ = java.util.Collections.unmodifiableList(childId_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -6600,10 +6638,34 @@ public final class ProcedureProtos { return procId_; } + // repeated uint64 child_id = 4; + public static final int CHILD_ID_FIELD_NUMBER = 4; + private java.util.List childId_; + /** + * repeated uint64 child_id = 4; + */ + public java.util.List + getChildIdList() { + return childId_; + } + /** + * repeated uint64 child_id = 4; + */ + public int getChildIdCount() { + return childId_.size(); + } + /** + * repeated uint64 child_id = 4; + */ + public long getChildId(int index) { + return childId_.get(index); + } + private void initFields() { type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.PROCEDURE_WAL_EOF; procedure_ = java.util.Collections.emptyList(); procId_ = 0L; + childId_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -6636,6 +6698,9 @@ public final class ProcedureProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(3, procId_); } + for (int i = 0; i < childId_.size(); i++) { + output.writeUInt64(4, childId_.get(i)); + } getUnknownFields().writeTo(output); } @@ -6657,6 +6722,15 @@ public final class ProcedureProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, procId_); } + { + int dataSize = 0; + for (int i = 0; i < childId_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt64SizeNoTag(childId_.get(i)); + } + size += dataSize; + size += 1 * getChildIdList().size(); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -6692,6 +6766,8 @@ public final class ProcedureProtos { result = result && (getProcId() == other.getProcId()); } + result = result && getChildIdList() + .equals(other.getChildIdList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -6717,6 +6793,10 @@ public final class ProcedureProtos { hash = (37 * hash) + PROC_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getProcId()); } + if (getChildIdCount() > 0) { + hash = (37 * hash) + CHILD_ID_FIELD_NUMBER; + hash = (53 * hash) + getChildIdList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -6837,6 +6917,8 @@ public final class ProcedureProtos { } procId_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); + childId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -6882,6 +6964,11 @@ public final class ProcedureProtos { to_bitField0_ |= 0x00000002; } result.procId_ = procId_; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + childId_ = java.util.Collections.unmodifiableList(childId_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.childId_ = childId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -6930,6 +7017,16 @@ public final class ProcedureProtos { if (other.hasProcId()) { setProcId(other.getProcId()); } + if (!other.childId_.isEmpty()) { + if (childId_.isEmpty()) { + childId_ = other.childId_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureChildIdIsMutable(); + childId_.addAll(other.childId_); + } + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -7276,6 +7373,72 @@ public final class ProcedureProtos { return this; } + // repeated uint64 child_id = 4; + private java.util.List childId_ = java.util.Collections.emptyList(); + private void ensureChildIdIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + childId_ = new java.util.ArrayList(childId_); + bitField0_ |= 0x00000008; + } + } + /** + * repeated uint64 child_id = 4; + */ + public java.util.List + getChildIdList() { + return java.util.Collections.unmodifiableList(childId_); + } + /** + * repeated uint64 child_id = 4; + */ + public int getChildIdCount() { + return childId_.size(); + } + /** + * repeated uint64 child_id = 4; + */ + public long getChildId(int index) { + return childId_.get(index); + } + /** + * repeated uint64 child_id = 4; + */ + public Builder setChildId( + int index, long value) { + ensureChildIdIsMutable(); + childId_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint64 child_id = 4; + */ + public Builder addChildId(long value) { + ensureChildIdIsMutable(); + childId_.add(value); + onChanged(); + return this; + } + /** + * repeated uint64 child_id = 4; + */ + public Builder addAllChildId( + java.lang.Iterable values) { + ensureChildIdIsMutable(); + super.addAll(values, childId_); + onChanged(); + return this; + } + /** + * repeated uint64 child_id = 4; + */ + public Builder clearChildId() { + childId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.ProcedureWALEntry) } @@ -7355,19 +7518,19 @@ public final class ProcedureProtos { "eTracker\0229\n\004node\030\001 \003(\0132+.hbase.pb.Proced" + "ureStoreTracker.TrackerNode\032A\n\013TrackerNo" + "de\022\020\n\010start_id\030\001 \002(\004\022\017\n\007updated\030\002 \003(\004\022\017\n" + - "\007deleted\030\003 \003(\004\"\235\002\n\021ProcedureWALEntry\022.\n\004", + "\007deleted\030\003 \003(\004\"\257\002\n\021ProcedureWALEntry\022.\n\004", "type\030\001 \002(\0162 .hbase.pb.ProcedureWALEntry." + "Type\022&\n\tprocedure\030\002 \003(\0132\023.hbase.pb.Proce" + - "dure\022\017\n\007proc_id\030\003 \001(\004\"\236\001\n\004Type\022\025\n\021PROCED" + - "URE_WAL_EOF\020\001\022\026\n\022PROCEDURE_WAL_INIT\020\002\022\030\n" + - "\024PROCEDURE_WAL_INSERT\020\003\022\030\n\024PROCEDURE_WAL" + - "_UPDATE\020\004\022\030\n\024PROCEDURE_WAL_DELETE\020\005\022\031\n\025P" + - "ROCEDURE_WAL_COMPACT\020\006*p\n\016ProcedureState" + - "\022\020\n\014INITIALIZING\020\001\022\014\n\010RUNNABLE\020\002\022\013\n\007WAIT" + - "ING\020\003\022\023\n\017WAITING_TIMEOUT\020\004\022\016\n\nROLLEDBACK" + - "\020\005\022\014\n\010FINISHED\020\006BE\n*org.apache.hadoop.hb", - "ase.protobuf.generatedB\017ProcedureProtosH" + - "\001\210\001\001\240\001\001" + "dure\022\017\n\007proc_id\030\003 \001(\004\022\020\n\010child_id\030\004 \003(\004\"" + + "\236\001\n\004Type\022\025\n\021PROCEDURE_WAL_EOF\020\001\022\026\n\022PROCE" + + "DURE_WAL_INIT\020\002\022\030\n\024PROCEDURE_WAL_INSERT\020" + + "\003\022\030\n\024PROCEDURE_WAL_UPDATE\020\004\022\030\n\024PROCEDURE" + + "_WAL_DELETE\020\005\022\031\n\025PROCEDURE_WAL_COMPACT\020\006" + + "*p\n\016ProcedureState\022\020\n\014INITIALIZING\020\001\022\014\n\010" + + "RUNNABLE\020\002\022\013\n\007WAITING\020\003\022\023\n\017WAITING_TIMEO" + + "UT\020\004\022\016\n\nROLLEDBACK\020\005\022\014\n\010FINISHED\020\006BE\n*or", + "g.apache.hadoop.hbase.protobuf.generated" + + "B\017ProcedureProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -7421,7 +7584,7 @@ public final class ProcedureProtos { internal_static_hbase_pb_ProcedureWALEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ProcedureWALEntry_descriptor, - new java.lang.String[] { "Type", "Procedure", "ProcId", }); + new java.lang.String[] { "Type", "Procedure", "ProcId", "ChildId", }); return null; } }; diff --git hbase-protocol/src/main/protobuf/MasterProcedure.proto hbase-protocol/src/main/protobuf/MasterProcedure.proto index 87aae6a..2236be3 100644 --- hbase-protocol/src/main/protobuf/MasterProcedure.proto +++ hbase-protocol/src/main/protobuf/MasterProcedure.proto @@ -262,6 +262,55 @@ message RestoreSnapshotStateData { repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; } +enum AssignTableRegionState { + ASSIGN_TABLE_REGION_PREPARE = 1; + ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION = 2; + ASSIGN_TABLE_REGION_CHECK_OFFLINE = 3; + ASSIGN_TABLE_REGION_GENERATE_PLAN = 4; + ASSIGN_TABLE_REGION_DO_ASSIGN = 5; + ASSIGN_TABLE_REGION_VALIDATE = 6; + ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN = 7; + ASSIGN_TABLE_REGION_MARK_FAILED_OPEN = 8; + ASSIGN_TABLE_REGION_POST_OPERATION = 9; +} + +message AssignTableRegionStateData { + required TableName table_name = 1; + required RegionInfo region_info = 2; + optional bool force_new_plan = 3 [default = false]; + optional ServerName destination_server = 4; + required uint32 retry_count = 5; + required uint64 start_time = 6; +} + +enum AssignRegionToServerState { + ASSIGN_REGION_PREPARE = 1; + ASSIGN_REGION_TO_SERVER = 2; + ASSIGN_REGION_CHECK_REGION_STATE = 3; + ASSIGN_REGION_SET_FAILED_OPEN = 4; + ASSIGN_REGION_MARK_OPEN = 5; +} + +message AssignRegionToServerData { + required ServerName server_name = 1; + required RegionInfo region_to_assign = 2; + required uint32 retry_count = 3; +} + +enum UnassignRegionFromServerState { + UNASSIGN_REGION_PREPARE = 1; + UNASSIGN_REGION_FROM_SERVER = 2; + UNASSIGN_REGION_CHECK_REGION_STATE = 3; + UNASSIGN_REGION_SET_FAILED_CLOSE = 4; + UNASSIGN_REGION_MARK_CLOSE = 5; +} + +message UnassignRegionFromServerData { + required ServerName server_name = 1; + required RegionInfo region_to_unassign = 2; + required uint32 retry_count = 3; +} + message ServerCrashStateData { required ServerName server_name = 1; optional bool distributed_log_replay = 2; diff --git hbase-protocol/src/main/protobuf/Procedure.proto hbase-protocol/src/main/protobuf/Procedure.proto index 55e44a4..ae1cf38 100644 --- hbase-protocol/src/main/protobuf/Procedure.proto +++ hbase-protocol/src/main/protobuf/Procedure.proto @@ -116,4 +116,5 @@ message ProcedureWALEntry { required Type type = 1; repeated Procedure procedure = 2; optional uint64 proc_id = 3; + repeated uint64 child_id = 4; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 55a15ee..7635c66 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.ServerName; @@ -78,6 +79,10 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; +import org.apache.hadoop.hbase.master.procedure.AssignTableRegionProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.quotas.QuotaExceededException; @@ -106,13 +111,13 @@ public class AssignmentManager { protected final MasterServices server; - private ServerManager serverManager; + public ServerManager serverManager; - private boolean shouldAssignRegionsWithFavoredNodes; + public boolean shouldAssignRegionsWithFavoredNodes; private LoadBalancer balancer; - private final MetricsAssignmentManager metricsAssignmentManager; + public final MetricsAssignmentManager metricsAssignmentManager; private final TableLockManager tableLockManager; @@ -613,7 +618,7 @@ public class AssignmentManager { // this problem as. Should we fail the current assignment. We should be able // to recover from this problem eventually (if the meta couldn't be updated // things should work normally and eventually get fixed up). - void processFavoredNodes(List regions) throws IOException { + public void processFavoredNodes(List regions) throws IOException { if (!shouldAssignRegionsWithFavoredNodes) return; // The AM gets the favored nodes info for each region and updates the meta // table with that info @@ -691,7 +696,20 @@ public class AssignmentManager { /** * Use care with forceNewPlan. It could cause double assignment. */ + //TODO: cleanup - logic duplicated in AssignTableRegionProcedure public void assign(HRegionInfo region, boolean forceNewPlan) { + boolean usingProcedureBasedAssignment = server.isInitialized() && + server.getConfiguration().getBoolean("hbase.master.assignment.procedure", false); + if (usingProcedureBasedAssignment) { + try { + LOG.info("Assigning region " + region + "using procedure"); + assignRegion(region, forceNewPlan); + } catch (Exception e) { + // TODO: ignore for now + LOG.warn("Assigning region " + region + " throws exception " + e); + } + return; + } if (isDisabledorDisablingRegionInRIT(region)) { return; } @@ -713,6 +731,18 @@ public class AssignmentManager { } } + public void assignRegion( + final HRegionInfo regionInfo, + final boolean forceNewPlan) throws IOException { + ProcedureExecutor procedureExecutor = server.getMasterProcedureExecutor(); + long procId = procedureExecutor.submitProcedure(new AssignTableRegionProcedure( + procedureExecutor.getEnvironment(), + regionInfo.getTable(), + regionInfo, + forceNewPlan)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + } + /** * Bulk assign regions to destination. * @param destination @@ -915,6 +945,7 @@ public class AssignmentManager { * messing around the region states (see handleRegion, on region opened * on an unexpected server scenario, for an example) */ + // TODO: cleanup, logic dupe in UnassignRegionFromServerProcedure private void unassign(final HRegionInfo region, final ServerName server, final ServerName dest) { for (int i = 1; i <= this.maximumAttempts; i++) { @@ -989,6 +1020,7 @@ public class AssignmentManager { /** * Set region to OFFLINE unless it is opening and forceNewPlan is false. */ + //TODO: cleanup - logic duplicated in AssignTableRegionProcedure private RegionState forceRegionStateToOffline( final HRegionInfo region, final boolean forceNewPlan) { RegionState state = regionStates.getRegionState(region); @@ -1039,6 +1071,7 @@ public class AssignmentManager { * @param state * @param forceNewPlan */ + //TODO: cleanup - logic duplicated in AssignTableRegionProcedure and AssignRegionToServerProcedure private void assign(RegionState state, boolean forceNewPlan) { long startTime = EnvironmentEdgeManager.currentTime(); try { @@ -1233,6 +1266,21 @@ public class AssignmentManager { return false; } + public Map> generateAssignmentPlan( + final List regions) throws IOException { + List servers = serverManager.createDestinationServersList(); + if (servers == null || servers.isEmpty()) { + throw new IOException("Found no destination server to assign region(s)"); + } + + // Generate a round-robin bulk assignment plan + Map> bulkPlan = balancer.roundRobinAssignment(regions, servers); + if (bulkPlan == null) { + throw new IOException("Unable to determine a plan to assign region(s)"); + } + return bulkPlan; + } + /** * @param region the region to assign * @param forceNewPlan If true, then if an existing plan exists, a new plan @@ -1240,7 +1288,7 @@ public class AssignmentManager { * @return Plan for passed region (If none currently, it creates one or * if no servers to assign, it returns null). */ - private RegionPlan getRegionPlan(final HRegionInfo region, + public RegionPlan getRegionPlan(final HRegionInfo region, final boolean forceNewPlan) throws HBaseIOException { // Pickup existing plan or make a new one final String encodedName = region.getEncodedName(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 9da8033..a404c8a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -1026,7 +1026,7 @@ public class RegionStates { return result; } - protected RegionState getRegionState(final HRegionInfo hri) { + public RegionState getRegionState(final HRegionInfo hri) { return getRegionState(hri.getEncodedName()); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignRegionToServerProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignRegionToServerProcedure.java new file mode 100644 index 0000000..44b856e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignRegionToServerProcedure.java @@ -0,0 +1,364 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignRegionToServerState; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.ipc.RemoteException; + +/** + * Assign one region to a server + */ +public class AssignRegionToServerProcedure +extends StateMachineProcedure +implements ServerProcedureInterface { + private static final Log LOG = LogFactory.getLog(AssignRegionToServerProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + /** + * Name of the target server to process. + */ + private ServerName serverName; + + /** + * Regions that were on the crashed server. + */ + private HRegionInfo regionToAssign; + + /** + * Number of retries. + */ + private int numberOfAttemptsSoFar; + + /** + * Number of retries. + */ + private Long maxWaitTime = null; + + /** + * Used when deserializing from a procedure store; we'll construct one of these then call + * {@link #deserializeStateData(InputStream)}. Do not use directly. + */ + public AssignRegionToServerProcedure() { + super(); + } + + /** + * Call this constructor queuing up a Procedure. + * @param env MasterProcedureEnv + * @param serverName Name of the server where the region is assigned to. + * @param regionToAssign target region to assign. + * @throws IOException + */ + public AssignRegionToServerProcedure( + final MasterProcedureEnv env, + final ServerName serverName, + final HRegionInfo regionToAssign) throws IOException { + this.serverName = serverName; + this.regionToAssign = regionToAssign; + this.numberOfAttemptsSoFar = 1; + this.setOwner(env.getRequestUser().getUGI().getShortUserName()); + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, AssignRegionToServerState state) { + if (LOG.isTraceEnabled()) { + LOG.trace(state); + } + + try { + switch (state) { + case ASSIGN_REGION_PREPARE: + prepareAssign(env); + setNextState(AssignRegionToServerState.ASSIGN_REGION_TO_SERVER); + break; + case ASSIGN_REGION_TO_SERVER: + LOG.info("Assign region to server " + this.serverName); + if (assignRegionToServer(env)) { + setNextState(AssignRegionToServerState.ASSIGN_REGION_MARK_OPEN); + } else { + setNextState(AssignRegionToServerState.ASSIGN_REGION_SET_FAILED_OPEN); + } + break; + case ASSIGN_REGION_CHECK_REGION_STATE: + // UNUSED NOW - TODO: implement WAIT_TIMEOUT retry for async assign RPC + // Moving the logic of reportRegionStateTransition from RS to here. + // If the assign RPC succeeds, update the region state to online; otherwise, retry + // until failure. + break; + case ASSIGN_REGION_SET_FAILED_OPEN: + LOG.info("Region" + regionToAssign + " assign to Server " + serverName + " failed."); + AssignmentManagerProcedureHelper.updateRegionState(env, regionToAssign, State.FAILED_OPEN); + return Flow.NO_MORE_STATE; + case ASSIGN_REGION_MARK_OPEN: + LOG.info("Region" + regionToAssign + " assign to Server " + serverName + " is done."); + //AssignmentManagerProcedureHelper.updateRegionState(env, regionToAssign, State.OPEN); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + LOG.warn("Error trying to assign region " + regionToAssign.getEncodedName() + "to server " + + this.serverName.getServerName() + " (in state=" + state + ")", e); + + setFailure("master-assign-region-to-server", e); + } catch (InterruptedException e) { + LOG.warn("Interrupted assigning region " + regionToAssign.getEncodedName() + "to server " + + this.serverName.getServerName() + " (in state=" + state + ")", e); + setFailure("master-assign-region-to-server", e); + Thread.currentThread().interrupt(); + // TODO: should I return Flow.NO_MORE_STATE? + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(MasterProcedureEnv env, AssignRegionToServerState state) + throws IOException { + // TODO + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected AssignRegionToServerState getState(int stateId) { + return AssignRegionToServerState.valueOf(stateId); + } + + @Override + protected int getStateId(AssignRegionToServerState state) { + return state.getNumber(); + } + + @Override + protected AssignRegionToServerState getInitialState() { + return AssignRegionToServerState.ASSIGN_REGION_PREPARE; + } + + @Override + protected void setNextState(AssignRegionToServerState state) { + if (aborted.get()) { + setAbortFailure("assign-region-to-server", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + return env.getProcedureQueue().waitRegions(this, regionToAssign.getTable(), regionToAssign); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().wakeRegions(this, regionToAssign.getTable(), regionToAssign); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" serverName="); + sb.append(this.serverName); + sb.append(", region="); + sb.append(regionToAssign); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.AssignRegionToServerData.Builder assignRegionToServerData = + MasterProcedureProtos.AssignRegionToServerData.newBuilder() + .setServerName(ProtobufUtil.toServerName(this.serverName)) + .setRegionToAssign(HRegionInfo.convert(this.regionToAssign)) + .setRetryCount(numberOfAttemptsSoFar); + + assignRegionToServerData.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.AssignRegionToServerData assignRegionToServerData = + MasterProcedureProtos.AssignRegionToServerData.parseDelimitedFrom(stream); + this.serverName = ProtobufUtil.toServerName(assignRegionToServerData.getServerName()); + this.regionToAssign = HRegionInfo.convert(assignRegionToServerData.getRegionToAssign()); + this.numberOfAttemptsSoFar = assignRegionToServerData.getRetryCount(); + } + + @Override + public ServerName getServerName() { + return this.serverName; + } + + @Override + public boolean hasMetaTableRegion() { + return this.regionToAssign.isMetaRegion(); + } + + @Override + public ServerOperationType getServerOperationType() { + return ServerOperationType.ASSIGN_REGION; + } + + /** + * Whether to yield at end of each successful flow step + */ + @Override + protected boolean isYieldBeforeExecuteFromState( + MasterProcedureEnv env, + AssignRegionToServerState state) { + return false; + } + + /** + * Prepare to assign the region and make sure that the region is in the right state. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareAssign(final MasterProcedureEnv env) throws IOException { + RegionState regionState = + AssignmentManagerProcedureHelper.getOrCreateRegionState(env, regionToAssign); + if (regionState.getState() != RegionState.State.PENDING_OPEN) { + String msg = "Skip assigning region " + regionToAssign.getEncodedName() + + ", it is in " + regionState; + LOG.warn(msg); + throw new IOException(msg); + } + } + + /** + * Send RPC to the target server to assign the region + * @param env MasterProcedureEnv + * @throws IOException + * @throws InterruptedException + */ + private boolean assignRegionToServer( + final MasterProcedureEnv env) throws IOException, InterruptedException { + final String assignMsg = + "Failed assignment of " + regionToAssign.getRegionNameAsString() + " to " + serverName; + AssignmentManager am = env.getMasterServices().getAssignmentManager(); + while (true) { + try { + List favoredNodes = ServerName.EMPTY_SERVER_LIST; + if (am.shouldAssignRegionsWithFavoredNodes) { + favoredNodes = + ((FavoredNodeLoadBalancer)am.getBalancer()).getFavoredNodes(regionToAssign); + } + + // TODO: call dispatch to batch RPC calls + // TODO: add procId as parameter??? + // TODO: change the RPC to async + am.serverManager.sendRegionOpen( + serverName, + regionToAssign, + favoredNodes); + return true; // we're done + } catch (Throwable t) { + if (t instanceof RemoteException) { + t = ((RemoteException) t).unwrapRemoteException(); + } + + // Should we wait a little before retrying? If the server is starting it's yes. + boolean hold = (t instanceof ServerNotRunningYetException); + + // In case socket is timed out and the region server is still online, + // the openRegion RPC could have been accepted by the server and + // just the response didn't go through. So we will retry to + // open the region on the same server. + boolean retry = !hold && (t instanceof java.net.SocketTimeoutException + && am.serverManager.isServerOnline(serverName)); + + if (hold) { + LOG.warn(assignMsg + ", waiting a little before trying on the same region server " + + "try=" + numberOfAttemptsSoFar, t); + + try { + long now = EnvironmentEdgeManager.currentTime(); + if (now < getMaxWaitTime(env)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Server is not yet up; waiting up to " + + (getMaxWaitTime(env) - now) + "ms", t); + } + Thread.sleep(100); + } else { + LOG.warn("Server " + serverName + " is not up for a while; try a new one", t); + break; + } + } catch (InterruptedException ie) { + String msg = "Failed to assign " + regionToAssign.getRegionNameAsString() + + " since interrupted"; + LOG.warn(msg); + Thread.currentThread().interrupt(); + throw new InterruptedException(msg); //???TODO + } + } else if (retry) { + // we want to retry as many times as needed as long as the RS is not dead. + if (LOG.isDebugEnabled()) { + LOG.debug(assignMsg + ", trying to assign to the same region server due ", t); + } + } else { + LOG.warn(assignMsg + ", trying to assign elsewhere instead;" + + " try=" + numberOfAttemptsSoFar, t); + break; + } + } + numberOfAttemptsSoFar++; + } + return false; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return maxWaitTime + */ + private long getMaxWaitTime(final MasterProcedureEnv env) { + if (this.maxWaitTime == null) { + // This is the max attempts, not retries, so it should be at least 1. + this.maxWaitTime = EnvironmentEdgeManager.currentTime() + + env.getMasterConfiguration().getLong("hbase.regionserver.rpc.startup.waittime", 60000); + } + return this.maxWaitTime; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignTableRegionProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignTableRegionProcedure.java new file mode 100644 index 0000000..8c6c0da --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignTableRegionProcedure.java @@ -0,0 +1,665 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.ipc.FailedServerException; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionState; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +/** + * The procedure to assign a region. + */ +@InterfaceAudience.Private +public class AssignTableRegionProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(AssignTableRegionProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Boolean traceEnabled; + + private AssignmentManager assignmentManager; + + private TableName tableName; + + private HRegionInfo regionInfo; + + private boolean forceNewPlan; + + private ServerName destinationServer; + + private int numberOfAttemptsSoFar; + + // TODO: maybe we don't need this + private Throwable previousException; + + /* + * Maximum attempts we recurse an assignment. + */ + private Integer maximumAttempts; + + private long startTime; + + /** + * The sleep time for which the assignment will wait before retrying in case of + * hbase:meta assignment failure due to lack of availability of region plan or bad region plan + */ + private Long sleepTimeBeforeRetryingSystemTableAssignment; + + /** + * Used when deserializing from a procedure store; we'll construct one of these then call + * {@link #deserializeStateData(InputStream)}. Do not use directly. + */ + public AssignTableRegionProcedure() { + this.traceEnabled = null; + this.assignmentManager = null; + this.maximumAttempts = null; + this.sleepTimeBeforeRetryingSystemTableAssignment = null; + } + + /** + * Call this constructor queuing up a Procedure. + * @param env MasterProcedureEnv + * @param tableName Name of the table which the region belongs to. + * @param regionInfo target region to assign + * @throws IOException + */ + public AssignTableRegionProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HRegionInfo regionInfo) throws IOException { + this(env, tableName, regionInfo, false); + } + + /** + * Call this constructor queuing up a Procedure. + * @param env MasterProcedureEnv + * @param tableName Name of the table which the region belongs to. + * @param regionInfo target region to assign + * @param forceNewPlan whether to force a new assignment plan even there is one existing + * @throws IOException + */ + public AssignTableRegionProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HRegionInfo regionInfo, + final boolean forceNewPlan) throws IOException { + this.traceEnabled = null; + this.assignmentManager = getAssignmentManager(env); + this.tableName = tableName; + this.regionInfo = regionInfo; + this.forceNewPlan = forceNewPlan; + this.destinationServer = null; + this.maximumAttempts = getMaximumAttempts(env); + this.sleepTimeBeforeRetryingSystemTableAssignment = + getSleepTimeBeforeRetryingSystemTableAssignment(env); + this.numberOfAttemptsSoFar = 1; + this.previousException = null; + this.startTime = EnvironmentEdgeManager.currentTime(); + this.setOwner(env.getRequestUser().getUGI().getShortUserName()); + } + + @Override + protected Flow executeFromState( + final MasterProcedureEnv env, + final AssignTableRegionState state) throws InterruptedException { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case ASSIGN_TABLE_REGION_PREPARE: + prepareAssign(env); + setNextState( + AssignTableRegionState.ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION); + break; + case ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION: + unAssignFailedOpenOrCloseRegion(env); + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_CHECK_OFFLINE); + break; + case ASSIGN_TABLE_REGION_CHECK_OFFLINE: + if (checkRegionState(env)) { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_GENERATE_PLAN); + } else { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_POST_OPERATION); + } + break; + case ASSIGN_TABLE_REGION_GENERATE_PLAN: + if (generateInitialAssignmentPlan(env)) { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_DO_ASSIGN); + } else { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_MARK_FAILED_OPEN); + } + break; + case ASSIGN_TABLE_REGION_DO_ASSIGN: + assignRegion(env); + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_VALIDATE); + break; + case ASSIGN_TABLE_REGION_VALIDATE: + if (verifyRegionIsOnline(env)) { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_POST_OPERATION); + } else { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN); + } + break; + case ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN: + if (generateNewAssignmentPlan(env)) { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_DO_ASSIGN); + } else { + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_MARK_FAILED_OPEN); + } + break; + case ASSIGN_TABLE_REGION_MARK_FAILED_OPEN: + AssignmentManagerProcedureHelper.updateRegionState(env, regionInfo, State.FAILED_OPEN); + setNextState(AssignTableRegionState.ASSIGN_TABLE_REGION_POST_OPERATION); + break; + case ASSIGN_TABLE_REGION_POST_OPERATION: + postAssignTableRegion(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + LOG.warn("Error trying to assign region in the table " + tableName + + " (in state=" + state + ")", e); + + setFailure("master-assign-table-region", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final AssignTableRegionState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + + try { + switch (state) { + case ASSIGN_TABLE_REGION_POST_OPERATION: + // Do nothing + // TODO: if region assignment reaches to this stage, it is done. Maybe we should fail + // the rollback. + break; + case ASSIGN_TABLE_REGION_MARK_FAILED_OPEN: + case ASSIGN_TABLE_REGION_GENERATE_NEW_PLAN: + case ASSIGN_TABLE_REGION_DO_ASSIGN: + // Mark the region assignment failed + AssignmentManagerProcedureHelper.updateRegionState(env, regionInfo, State.FAILED_OPEN); + break; + case ASSIGN_TABLE_REGION_GENERATE_PLAN: + case ASSIGN_TABLE_REGION_CHECK_OFFLINE: + case ASSIGN_TABLE_REGION_UNASSIGN_FAILED_OPEN_OR_CLOSE_REGION: + case ASSIGN_TABLE_REGION_PREPARE: + // Do nothing + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + //} catch (IOException e) { + } catch (Exception e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for assigning the region in table " + + tableName, e); + throw e; + } + } + + @Override + protected AssignTableRegionState getState(final int stateId) { + return AssignTableRegionState.valueOf(stateId); + } + + @Override + protected int getStateId(final AssignTableRegionState state) { + return state.getNumber(); + } + + @Override + protected AssignTableRegionState getInitialState() { + return AssignTableRegionState.ASSIGN_TABLE_REGION_PREPARE; + } + + @Override + protected void setNextState(AssignTableRegionState state) { + if (aborted.get()) { + setAbortFailure("assign-table-region", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.AssignTableRegionStateData.Builder assignTableRegionMsg = + MasterProcedureProtos.AssignTableRegionStateData.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setRegionInfo(HRegionInfo.convert(regionInfo)) + .setForceNewPlan(forceNewPlan) + .setRetryCount(numberOfAttemptsSoFar) + .setStartTime(startTime); + if (destinationServer != null) { + assignTableRegionMsg.setDestinationServer(ProtobufUtil.toServerName(destinationServer)); + } + + assignTableRegionMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.AssignTableRegionStateData assignTableRegionMsg = + MasterProcedureProtos.AssignTableRegionStateData.parseDelimitedFrom(stream); + tableName = ProtobufUtil.toTableName(assignTableRegionMsg.getTableName()); + regionInfo = HRegionInfo.convert(assignTableRegionMsg.getRegionInfo()); + forceNewPlan = assignTableRegionMsg.getForceNewPlan(); + startTime = assignTableRegionMsg.getStartTime(); + if (assignTableRegionMsg.hasDestinationServer()) { + destinationServer = ProtobufUtil.toServerName(assignTableRegionMsg.getDestinationServer()); + } + else { + destinationServer = null; + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(" region="); + sb.append(regionInfo); + sb.append(" ForceNewPlan="); + sb.append(forceNewPlan); + sb.append(")"); + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + return env.getProcedureQueue().waitRegions(this, getTableName(), regionInfo); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().wakeRegions(this, getTableName(), regionInfo); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.ASSIGN; + } + + /** + * Prepare to assign the region and make sure that the table exists and is not disabled. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareAssign(final MasterProcedureEnv env) throws IOException { + // Check whether table exists + if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + throw new TableNotFoundException(tableName); + } + + TableStateManager tsm = getAssignmentManager(env).getTableStateManager(); + TableState.State state = tsm.getTableState(tableName); + if(state.equals(TableState.State.DISABLING) || state.equals(TableState.State.DISABLED)){ + LOG.warn("Table " + tableName + " is " + + (state.equals(TableState.State.DISABLED) ? "DISABLED" : "DISABLING") + " skip assigning"); + throw new TableNotEnabledException("Table " + tableName); + } + } + + /** + * If the target region is in FAILED_CLOSE/FAILED_OPEN state from previous assignment, try + * to close it. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void unAssignFailedOpenOrCloseRegion(final MasterProcedureEnv env) throws IOException { + RegionState regionState = + AssignmentManagerProcedureHelper.getOrCreateRegionState(env, regionInfo); + + if (regionState.getState() == RegionState.State.FAILED_CLOSE || + regionState.getState() == RegionState.State.FAILED_OPEN) { + if (regionState.getServerName() == null) { + // We don't know where the region is, offline it. + // No need to send CLOSE RPC + LOG.warn("Attempting to unassign a region not in RegionStates " + + regionInfo.getRegionNameAsString() + ", offlined"); + getAssignmentManager(env).regionOffline(regionInfo); + } else { + LOG.info( + "Region " + regionInfo.getEncodedName() + " is in " + regionState + ", unassign it."); + AssignmentManagerProcedureHelper.updateRegionState(env, regionInfo, State.PENDING_CLOSE); + + addChildProcedure( + new UnassignRegionFromServerProcedure(env, regionState.getServerName(), regionInfo)); + } + } + } + + /** + * Check whether the region is in a state for assignment + * @param env MasterProcedureEnv + * @return true if the region is in the right state; false otherwise + * @throws IOException + */ + private boolean checkRegionState(final MasterProcedureEnv env) throws IOException { + Boolean result = true; + RegionState regionState = + AssignmentManagerProcedureHelper.getOrCreateRegionState(env, regionInfo); + + switch (regionState.getState()) { + case OPEN: + case OPENING: + case PENDING_OPEN: + case CLOSING: + case PENDING_CLOSE: + if (!forceNewPlan) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip assigning region " + regionInfo + ", it is already in " + regionState); + } + result = false; + break; + } + // fall through + case OFFLINE: + case CLOSED: + // Perfect! + if (forceNewPlan && LOG.isDebugEnabled()) { + LOG.debug("Force region state offline " + regionState); + } + break; + case FAILED_CLOSE: + case FAILED_OPEN: + default: + LOG.error("Skip assigning region " + regionInfo + ", which is in " + regionState); + result = false; + break; + } + return result; + } + + /** + * Generate an assignment plan for this procedure. We could reuse the previous plan if + * not forcing to find a new plan + * @param env MasterProcedureEnv + * @return true if a new plan is found; false otherwise + * @throws IOException + */ + private boolean generateInitialAssignmentPlan(final MasterProcedureEnv env) throws IOException { + if (destinationServer == null) { + RegionPlan regionPlan = generateAssignmentPlan(env, forceNewPlan); + if (regionPlan != null) { + destinationServer = regionPlan.getDestination(); + } + } + return (destinationServer != null); + } + + /** + * Generate a new assignment plan on retry. The previous assign request failed. + * @param env MasterProcedureEnv + * @return true if a new plan is found; false otherwise + * @throws IOException + */ + private boolean generateNewAssignmentPlan(final MasterProcedureEnv env) { + // Force a new plan and reassign. Will return null if no servers. + // The new plan could be the same as the existing plan since we don't + // exclude the server of the original plan, which should not be + // excluded since it could be the only server up now. + RegionPlan newRegionPlan = generateAssignmentPlan(env, true); + + if (newRegionPlan == null) { + destinationServer = null; + return false; + } + + if (!destinationServer.equals(newRegionPlan.getDestination())) { + // Clean out plan we failed execute and one that doesn't look like it'll + // succeed anyways; we need a new plan! + // Transition back to OFFLINE + AssignmentManagerProcedureHelper.updateRegionState(env, regionInfo, State.OFFLINE); + destinationServer = newRegionPlan.getDestination(); + } else if(previousException instanceof FailedServerException) { + // TODO: ???how to keep previous exception + // TODO: maybe move this logic to AssignRegionToServerProcedure + try { + LOG.info("Trying to re-assign " + regionInfo.getRegionNameAsString() + + " to the same failed server."); + Thread.sleep(1 + env.getMasterConfiguration().getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, + RpcClient.FAILED_SERVER_EXPIRY_DEFAULT)); + } catch (InterruptedException ie) { + LOG.warn("Failed to assign " + + regionInfo.getRegionNameAsString() + " since interrupted", ie); + Thread.currentThread().interrupt(); + destinationServer = null; + return false; + } + } + return true; + } + + /** + * Helper function to generate assignment plan. + * @param env MasterProcedureEnv + * @param forceNewPlan whether to force a new assignment plan + * @return RegionPlan null if no region plan is found + * @throws IOException + */ + private RegionPlan generateAssignmentPlan( + final MasterProcedureEnv env, + final boolean forceNewPlan) { + if (numberOfAttemptsSoFar > getMaximumAttempts(env)) { + return null; + } + + RegionPlan currentRegionPlan = null; + do { + try { + currentRegionPlan = getAssignmentManager(env).getRegionPlan(regionInfo, forceNewPlan); + } catch (HBaseIOException e) { + LOG.warn("Failed to get region plan", e); + } + if (currentRegionPlan == null) { + LOG.warn("Unable to find a viable location to assign region " + + regionInfo.getRegionNameAsString()); + + // For system table region, we have to keep retrying until succeeding + if (regionInfo.isSystemTable()) { + if (numberOfAttemptsSoFar == getMaximumAttempts(env) % 10) { + LOG.warn("Unable to determine a plan to assign a system table region " + regionInfo + + " after " + this.numberOfAttemptsSoFar + " attempts. Continue retrying."); + waitForRetryingSystemTableRegionAssignment(env); // back off a little and then retry + } + } + numberOfAttemptsSoFar++; + } + } while (numberOfAttemptsSoFar <= getMaximumAttempts(env) && currentRegionPlan == null); + + return currentRegionPlan; + } + + /** + * Assign the region + * @param env MasterProcedureEnv + * @throws IOException + */ + private void assignRegion(final MasterProcedureEnv env) + throws IOException, InterruptedException { + RegionStates regionStates = getAssignmentManager(env).getRegionStates(); + + if (env.getMasterServices().isStopped() || env.getMasterServices().isAborted()) { + LOG.info("Skip assigning " + regionInfo.getRegionNameAsString() + + ", the server is stopped/aborted"); + return; + } + + LOG.info("Assigning " + regionInfo.getRegionNameAsString() + + " to " + destinationServer.toString()); + // Transition RegionState to PENDING_OPEN + regionStates.updateRegionState(regionInfo, State.PENDING_OPEN, destinationServer); + + addChildProcedure(new AssignRegionToServerProcedure(env, destinationServer, regionInfo)); + //} catch (InterruptedException ie) { + // LOG.warn("Failed to assign " + // + region.getRegionNameAsString() + " since interrupted", ie); + // regionStates.updateRegionState(region, State.FAILED_OPEN); + // Thread.currentThread().interrupt(); + // return; + } + + /** + * Check whether the target region is online + * @param env MasterProcedureEnv + * @return true if the region state is OPEN; false otherwise + */ + private boolean verifyRegionIsOnline(final MasterProcedureEnv env) { + RegionState regionState = + AssignmentManagerProcedureHelper.getOrCreateRegionState(env, regionInfo); + return (regionState.getState() == RegionState.State.OPEN); + } + + /** + * Post assignment actions + * @param env MasterProcedureEnv + **/ + private void postAssignTableRegion(final MasterProcedureEnv env) { + getAssignmentManager(env).metricsAssignmentManager.updateAssignmentTime( + EnvironmentEdgeManager.currentTime() - startTime); + } + + /** + * Wait for some time before retrying meta table region assignment + * @param env MasterProcedureEnv + */ + private void waitForRetryingSystemTableRegionAssignment(final MasterProcedureEnv env) { + try { + Thread.sleep(getSleepTimeBeforeRetryingSystemTableAssignment(env)); + } catch (InterruptedException e) { + LOG.error("Got exception while waiting for hbase:meta assignment"); + Thread.currentThread().interrupt(); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return maximumAttempts + */ + private int getMaximumAttempts(final MasterProcedureEnv env) { + if (this.maximumAttempts == null) { + // This is the max attempts, not retries, so it should be at least 1. + // For system tables, we should kept retrying until succeeding. + this.maximumAttempts = tableName.isSystemTable() ? + Integer.MAX_VALUE : + Math.max(1, env.getMasterConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); + } + return this.maximumAttempts; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return sleep + */ + private long getSleepTimeBeforeRetryingSystemTableAssignment(final MasterProcedureEnv env) { + if (this.sleepTimeBeforeRetryingSystemTableAssignment == null) { + // We still honor the old meta table sleep time if the system table config not exist. + this.sleepTimeBeforeRetryingSystemTableAssignment = + env.getMasterConfiguration().getLong("hbase.systemtable.assignment.retry.sleeptime", -1); + if (this.sleepTimeBeforeRetryingSystemTableAssignment == -1) { + // Wait for 1 second by default + this.sleepTimeBeforeRetryingSystemTableAssignment = env.getMasterConfiguration().getLong( + "hbase.meta.assignment.retry.sleeptime", 1000l); + } + } + return this.sleepTimeBeforeRetryingSystemTableAssignment; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @param env MasterProcedureEnv + * @return assignmentManager + */ + private AssignmentManager getAssignmentManager(final MasterProcedureEnv env) { + if (assignmentManager == null) { + assignmentManager = env.getMasterServices().getAssignmentManager(); + } + return assignmentManager; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignmentManagerProcedureHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignmentManagerProcedureHelper.java new file mode 100644 index 0000000..ed461a9 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AssignmentManagerProcedureHelper.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.RegionState.State; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class AssignmentManagerProcedureHelper { + private static final Log LOG = LogFactory.getLog(AssignmentManagerProcedureHelper.class); + + private AssignmentManagerProcedureHelper() {} + + /** + * Get region state. If it does not exist, create one + **/ + public static RegionState getOrCreateRegionState( + final MasterProcedureEnv env, + final HRegionInfo regionInfo) { + RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates(); + RegionState state = regionStates.getRegionState(regionInfo); + if (state == null) { + LOG.warn("Assigning but not in region states: " + regionInfo); + state = regionStates.createRegionState(regionInfo); + } + return state; + } + public static void updateRegionState( + final MasterProcedureEnv env, + final HRegionInfo regionInfo, + final State newRegionState) { + env.getMasterServices().getAssignmentManager().getRegionStates().updateRegionState( + regionInfo, + newRegionState); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index 5518b8b..c84667a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -505,7 +505,7 @@ public class DisableTableProcedure && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) { continue; } - pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler", new Runnable() { + pool.execute(Trace.wrap("DisableTableProcedure.BulkDisabler", new Runnable() { @Override public void run() { assignmentManager.unassign(region); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java index b5c24ff..2943bc3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerProcedureInterface.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public interface ServerProcedureInterface { public enum ServerOperationType { - CRASH_HANDLER + ASSIGN_REGION, UNASSIGN_REGION, CRASH_HANDLER }; /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/UnassignRegionFromServerProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/UnassignRegionFromServerProcedure.java new file mode 100644 index 0000000..af9f2b3 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/UnassignRegionFromServerProcedure.java @@ -0,0 +1,369 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.ipc.FailedServerException; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.UnassignRegionFromServerState; +import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.ipc.RemoteException; + +/** + * Unassign a region from a server + */ +public class UnassignRegionFromServerProcedure +extends StateMachineProcedure +implements ServerProcedureInterface { + private static final Log LOG = LogFactory.getLog(UnassignRegionFromServerProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + /** + * Name of the target server to process. + */ + private ServerName serverName; + + /** + * Regions that were on the crashed server. + */ + private HRegionInfo regionToUnassign; + + /* + * Maximum attempts we recurse an assignment. + */ + private Integer maximumAttempts; + + /** + * Number of retries. + */ + private int numberOfAttemptsSoFar; + + /** + * Used when deserializing from a procedure store; we'll construct one of these then call + * {@link #deserializeStateData(InputStream)}. Do not use directly. + */ + public UnassignRegionFromServerProcedure() { + super(); + } + + /** + * Call this constructor queuing up a Procedure. + * @param env MasterProcedureEnv + * @param serverName Name of the server where the region is unassigned from. + * @param regionToUnassign target region to unassign. + * @throws IOException + */ + public UnassignRegionFromServerProcedure( + final MasterProcedureEnv env, + final ServerName serverName, + final HRegionInfo regionToUnassign) throws IOException { + this.serverName = serverName; + this.regionToUnassign = regionToUnassign; + this.numberOfAttemptsSoFar = 1; + this.maximumAttempts = getMaximumAttempts(env); + this.setOwner(env.getRequestUser().getUGI().getShortUserName()); + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, UnassignRegionFromServerState state) { + if (LOG.isTraceEnabled()) { + LOG.trace(state); + } + + try { + switch (state) { + case UNASSIGN_REGION_PREPARE: + prepareUnassign(env); + setNextState(UnassignRegionFromServerState.UNASSIGN_REGION_FROM_SERVER); + break; + case UNASSIGN_REGION_FROM_SERVER: + LOG.info("Unassign region from server " + this.serverName); + if (sendRegionToClose(env)) { + setNextState(UnassignRegionFromServerState.UNASSIGN_REGION_MARK_CLOSE); + } else { + setNextState(UnassignRegionFromServerState.UNASSIGN_REGION_SET_FAILED_CLOSE); + } + break; + case UNASSIGN_REGION_CHECK_REGION_STATE: + // TODO: Unused state for now. + // implement WAIT_TIMEOUT retry for async unassign RPC + break; + case UNASSIGN_REGION_SET_FAILED_CLOSE: + LOG.info("Region unassigned from Server " + serverName + " failed."); + AssignmentManagerProcedureHelper.updateRegionState( + env, regionToUnassign, State.FAILED_CLOSE); + return Flow.NO_MORE_STATE; + case UNASSIGN_REGION_MARK_CLOSE: + LOG.info("Region unassigned from Server " + serverName + " is done."); + AssignmentManagerProcedureHelper.updateRegionState(env, regionToUnassign, State.OFFLINE); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + LOG.warn("Error trying to assign region " + regionToUnassign.getEncodedName() + "to server " + + this.serverName.getServerName() + " (in state=" + state + ")", e); + + setFailure("master-assign-region-to-server", e); + //} catch (InterruptedException e) { + // LOG.warn("Interrupted assigning region " + regionToUnassign.getEncodedName() + "to server " + // + this.serverName.getServerName() + " (in state=" + state + ")", e); + // setFailure("master-assign-region-to-server", e); + // Thread.currentThread().interrupt(); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(MasterProcedureEnv env, UnassignRegionFromServerState state) + throws IOException { + // TODO + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected UnassignRegionFromServerState getState(int stateId) { + return UnassignRegionFromServerState.valueOf(stateId); + } + + @Override + protected int getStateId(UnassignRegionFromServerState state) { + return state.getNumber(); + } + + @Override + protected UnassignRegionFromServerState getInitialState() { + return UnassignRegionFromServerState.UNASSIGN_REGION_PREPARE; + } + + @Override + protected void setNextState(UnassignRegionFromServerState state) { + if (aborted.get()) { + setAbortFailure("assign-region-to-server", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + protected boolean abort(MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + // TODO: if this is a child procedure, tranfer parent procedure lock to this proc + return env.getProcedureQueue().tryAcquireServerSharedLock(this, getServerName()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + // TODO: if this is a child procedure, tranfer lock back to parent procedure + env.getProcedureQueue().releaseServerSharedLock(this, getServerName()); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" serverName="); + sb.append(this.serverName); + sb.append(", region="); + sb.append(regionToUnassign); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.AssignRegionToServerData.Builder unassignRegionFromServerData = + MasterProcedureProtos.AssignRegionToServerData.newBuilder() + .setServerName(ProtobufUtil.toServerName(this.serverName)) + .setRegionToAssign(HRegionInfo.convert(this.regionToUnassign)) + .setRetryCount(numberOfAttemptsSoFar); + + unassignRegionFromServerData.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.AssignRegionToServerData unassignRegionFromServerData = + MasterProcedureProtos.AssignRegionToServerData.parseDelimitedFrom(stream); + this.serverName = ProtobufUtil.toServerName(unassignRegionFromServerData.getServerName()); + this.regionToUnassign = HRegionInfo.convert(unassignRegionFromServerData.getRegionToAssign()); + this.numberOfAttemptsSoFar = unassignRegionFromServerData.getRetryCount(); + } + + @Override + public ServerName getServerName() { + return this.serverName; + } + + @Override + public boolean hasMetaTableRegion() { + return this.regionToUnassign.isMetaRegion(); + } + + @Override + public ServerOperationType getServerOperationType() { + return ServerOperationType.UNASSIGN_REGION; + } + + /** + * Whether to yield at end of each successful flow step + */ + @Override + protected boolean isYieldBeforeExecuteFromState( + MasterProcedureEnv env, + UnassignRegionFromServerState state) { + return false; + } + + /** + * Prepare to unassign the region and make sure that the region is in the right state. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareUnassign(final MasterProcedureEnv env) throws IOException { + RegionState regionState = + AssignmentManagerProcedureHelper.getOrCreateRegionState(env, regionToUnassign); + if (regionState.isUnassignable() && + regionState.getState() != RegionState.State.PENDING_CLOSE && + regionState.getState() != RegionState.State.FAILED_CLOSE && + regionState.getState() != RegionState.State.FAILED_OPEN) { + String msg = "Skip unassigning region " + regionToUnassign.getEncodedName() + + ", it is in " + regionState; + LOG.warn(msg); + throw new IOException(msg); + } + } + + /** + * Send RPC to the target server to close the region + * @param env MasterProcedureEnv + * @throws IOException + */ + private boolean sendRegionToClose(final MasterProcedureEnv env) throws IOException { + AssignmentManager am = env.getMasterServices().getAssignmentManager(); + + while(numberOfAttemptsSoFar <= getMaximumAttempts(env)) { + if (env.getMasterServices().isStopped() || env.getMasterServices().isAborted()) { + LOG.debug("Server stopped/aborted; skipping unassign of " + regionToUnassign); + return false; + } + if (!am.serverManager.isServerOnline(serverName)) { + LOG.debug("Offline " + regionToUnassign.getRegionNameAsString() + + ", no need to unassign since it's on a dead server: " + regionToUnassign); + return true; + } + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Sent CLOSE to " + serverName + " for region " + + regionToUnassign.getRegionNameAsString()); + } + // Send CLOSE RPC + // TODO: call dispatch to batch RPC calls + // TODO: add procId as parameter??? + // TODO: change the RPC to async + am.serverManager.sendRegionClose(serverName, regionToUnassign); + + return true; + } catch (Throwable t) { + long sleepTime = 0; + Configuration conf = env.getMasterConfiguration(); + if (t instanceof RemoteException) { + t = ((RemoteException)t).unwrapRemoteException(); + } + if (t instanceof RegionServerAbortedException + || t instanceof RegionServerStoppedException + || t instanceof ServerNotRunningYetException) { + // RS is aborting, we cannot offline the region since the region may need to do WAL + // recovery. Until we see the RS expiration, we should retry. + sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, + RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); + + } else if (t instanceof NotServingRegionException) { + if (LOG.isDebugEnabled()) { + LOG.debug("Offline " + regionToUnassign.getRegionNameAsString() + + ", it's not any more on " + serverName, t); + } + return true; + } else if (t instanceof FailedServerException && numberOfAttemptsSoFar < maximumAttempts) { + // In case the server is in the failed server list, no point to + // retry too soon. Retry after the failed_server_expiry time + sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, + RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); + if (LOG.isDebugEnabled()) { + LOG.debug(serverName + " is on failed server list; waiting " + sleepTime + "ms", t); + } + } + try { + if (sleepTime > 0) { + Thread.sleep(sleepTime); + } + } catch (InterruptedException ie) { + LOG.warn("Interrupted unassign " + regionToUnassign.getRegionNameAsString(), ie); + Thread.currentThread().interrupt(); + return false; + } + LOG.info("Server " + serverName + " returned " + t + " for " + + regionToUnassign.getRegionNameAsString() + ", try=" + numberOfAttemptsSoFar + + " of " + this.maximumAttempts, t); + } + numberOfAttemptsSoFar++; + } + // Run out of attempts + return false; + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return maximumAttempts + */ + private int getMaximumAttempts(final MasterProcedureEnv env) { + if (this.maximumAttempts == null) { + // This is the max attempts, not retries, so it should be at least 1. + this.maximumAttempts = + Math.max(1, env.getMasterConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); + } + return this.maximumAttempts; + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 2d054d8..62e7174 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -207,13 +207,31 @@ public class MasterProcedureTestingUtility { public static void testRecoveryAndDoubleExecution( final ProcedureExecutor procExec, final long procId, final int numSteps, final TState[] states) throws Exception { + testRecoveryAndDoubleExecution(procExec, procId, numSteps, numSteps, states); + } + + public static void testRecoveryAndDoubleExecution( + final ProcedureExecutor procExec, final long procId, + final int minNumSteps, final int maxNumSteps, final TState[] states) throws Exception { ProcedureTestingUtility.waitProcedure(procExec, procId); assertEquals(false, procExec.isRunning()); - for (int i = 0; i < numSteps; ++i) { - LOG.info("Restart "+ i +" exec state: " + states[i]); + int i; + for (i = 0; i < minNumSteps; ++i) { + if (i < states.length) { + LOG.info("Restart "+ i +" exec state: " + states[i]); + } else { + LOG.info("Restart "+ i +" exec state: " + states[states.length - 1]); + } ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + LOG.info("Restart "+ i +" exec state: " + procExec.getProcedure(procId).getState()); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + while (!procExec.isFinished(procId) && i < maxNumSteps) { + LOG.info("Restart "+ i +" exec state: " + procExec.getProcedure(procId).getState()); ProcedureTestingUtility.restart(procExec); ProcedureTestingUtility.waitProcedure(procExec, procId); + i++; } assertEquals(true, procExec.isRunning()); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAssignTableRegionProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAssignTableRegionProcedure.java new file mode 100644 index 0000000..f2388ec --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAssignTableRegionProcedure.java @@ -0,0 +1,274 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AssignTableRegionState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertTrue; + +@Category({MasterTests.class, MediumTests.class}) +public class TestAssignTableRegionProcedure { + private static final Log LOG = LogFactory.getLog(TestAssignTableRegionProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private final static byte[] FAMILY = Bytes.toBytes("FAMILY"); + final static Configuration conf = UTIL.getConfiguration(); + private static Admin admin; + + private static void setupConf(Configuration conf) { + // Reduce the maximum attempts to speed up the test + conf.setInt("hbase.assignment.maximum.attempts", 3); + conf.setInt("hbase.master.maximum.ping.server.attempts", 3); + conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1); + + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 3); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(conf); + UTIL.startMiniCluster(1); + admin = UTIL.getHBaseAdmin(); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + resetProcExecutorTestingKillFlag(); + } + + @After + public void tearDown() throws Exception { + resetProcExecutorTestingKillFlag(); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + private void resetProcExecutorTestingKillFlag() { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + /** + * This tests region assignment + */ + @Test (timeout=60000) + public void testAssignOneRegion() throws Exception { + final TableName tableName = TableName.valueOf("testAssignOneRegion"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HRegionInfo hri = generateRegionInfo(desc); + + long procId = procExec.submitProcedure(new AssignTableRegionProcedure( + procExec.getEnvironment(), tableName, hri, true)); + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + validateRegionState(hri); + } + + /** + * This tests assigning a FAILED_OPEN region to a table + */ + @Test (timeout=60000) + public void testAssignFailedOpenRegion() throws Exception { + final TableName tableName = TableName.valueOf("testAssignFailedOpenRegion"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HRegionInfo hri = generateRegionInfo(desc); + + RegionStates regionStates = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + regionStates.updateRegionState(hri, RegionState.State.FAILED_OPEN); + RegionState oldState = regionStates.getRegionState(hri); + assertTrue(oldState.isFailedOpen()); + assertTrue(oldState.getServerName() == null); + + long procId = procExec.submitProcedure(new AssignTableRegionProcedure( + procExec.getEnvironment(), tableName, hri, true)); + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + validateRegionState(hri); + } + + /** + * This tests assigning a FAILED_CLOSE region to a table + */ + @Test (timeout=60000) + public void testAssignFailedCloseRegion() throws Exception { + final TableName tableName = TableName.valueOf("testAssignFailedCloseRegion"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HRegionInfo hri = generateRegionInfo(desc); + + RegionStates regionStates = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + ServerName rsName = UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + regionStates.updateRegionState(hri, RegionState.State.FAILED_CLOSE, rsName); + RegionState oldState = regionStates.getRegionState(hri); + assertTrue(oldState.isFailedClose()); + assertTrue(oldState.getServerName().equals(rsName)); + + long procId = procExec.submitProcedure(new AssignTableRegionProcedure( + procExec.getEnvironment(), tableName, hri, true)); + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + validateRegionState(hri); + } + + /** + * This tests assigning a region to a disabled table + */ + @Test (timeout=60000) + public void testAssignDisabledTableRegion() throws Exception { + final TableName tableName = TableName.valueOf("testAssignDisabledTableRegion"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HRegionInfo hri = generateRegionInfo(desc); + + // Disable the table + admin.disableTable(tableName); + + // You can't assign a disabled table region + long procId = procExec.submitProcedure(new AssignTableRegionProcedure( + procExec.getEnvironment(), tableName, hri, true)); + ProcedureTestingUtility.waitProcedure(procExec, procId); + + ProcedureInfo result = procExec.getResult(procId); + assertTrue(result.isFailed()); + LOG.debug("Assign failed with exception: " + result.getExceptionFullMessage()); + assertTrue( + ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotEnabledException); + } + + @Ignore + @Test + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HRegionInfo hri = generateRegionInfo(desc); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + long procId = procExec.submitProcedure( + new AssignTableRegionProcedure(procExec.getEnvironment(), tableName, hri, true)); + + // Restart the executor and execute the step twice + int numberOfSteps = AssignTableRegionState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + AssignTableRegionState.values().length + AssignTableRegionState.values().length, // testing + AssignTableRegionState.values()); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + validateRegionState(hri); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } + + private HRegionInfo generateRegionInfo(final HTableDescriptor tableDesc) + throws IOException { + Table meta = UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + tableDesc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + return hri; + } + + private void validateRegionState(final HRegionInfo hri) + throws IOException, InterruptedException { + final long timeout = 200; // milliseconds + RegionStates regionStates = + UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + ServerName serverName = regionStates.getRegionServerOfRegion(hri); + UTIL.assertRegionOnServer(hri, serverName, timeout); + + RegionState newState = regionStates.getRegionState(hri); + assertTrue(newState.isOpened()); + } +}