diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 4713a0a..d83ee19 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -707,6 +707,242 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(enum_scope:DeleteColumnFamilyState)
}
+ /**
+ * Protobuf enum {@code EnableTableState}
+ */
+ public enum EnableTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * ENABLE_TABLE_PREPARE = 1;
+ */
+ ENABLE_TABLE_PREPARE(0, 1),
+ /**
+ * ENABLE_TABLE_PRE_OPERATION = 2;
+ */
+ ENABLE_TABLE_PRE_OPERATION(1, 2),
+ /**
+ * ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3;
+ */
+ ENABLE_TABLE_SET_ENABLING_TABLE_STATE(2, 3),
+ /**
+ * ENABLE_TABLE_MARK_REGIONS_ONLINE = 4;
+ */
+ ENABLE_TABLE_MARK_REGIONS_ONLINE(3, 4),
+ /**
+ * ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5;
+ */
+ ENABLE_TABLE_SET_ENABLED_TABLE_STATE(4, 5),
+ /**
+ * ENABLE_TABLE_POST_OPERATION = 6;
+ */
+ ENABLE_TABLE_POST_OPERATION(5, 6),
+ ;
+
+ /**
+ * ENABLE_TABLE_PREPARE = 1;
+ */
+ public static final int ENABLE_TABLE_PREPARE_VALUE = 1;
+ /**
+ * ENABLE_TABLE_PRE_OPERATION = 2;
+ */
+ public static final int ENABLE_TABLE_PRE_OPERATION_VALUE = 2;
+ /**
+ * ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3;
+ */
+ public static final int ENABLE_TABLE_SET_ENABLING_TABLE_STATE_VALUE = 3;
+ /**
+ * ENABLE_TABLE_MARK_REGIONS_ONLINE = 4;
+ */
+ public static final int ENABLE_TABLE_MARK_REGIONS_ONLINE_VALUE = 4;
+ /**
+ * ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5;
+ */
+ public static final int ENABLE_TABLE_SET_ENABLED_TABLE_STATE_VALUE = 5;
+ /**
+ * ENABLE_TABLE_POST_OPERATION = 6;
+ */
+ public static final int ENABLE_TABLE_POST_OPERATION_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static EnableTableState valueOf(int value) {
+ switch (value) {
+ case 1: return ENABLE_TABLE_PREPARE;
+ case 2: return ENABLE_TABLE_PRE_OPERATION;
+ case 3: return ENABLE_TABLE_SET_ENABLING_TABLE_STATE;
+ case 4: return ENABLE_TABLE_MARK_REGIONS_ONLINE;
+ case 5: return ENABLE_TABLE_SET_ENABLED_TABLE_STATE;
+ case 6: return ENABLE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public EnableTableState findValueByNumber(int number) {
+ return EnableTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
+ }
+
+ private static final EnableTableState[] VALUES = values();
+
+ public static EnableTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private EnableTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:EnableTableState)
+ }
+
+ /**
+ * Protobuf enum {@code DisableTableState}
+ */
+ public enum DisableTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * DISABLE_TABLE_PREPARE = 1;
+ */
+ DISABLE_TABLE_PREPARE(0, 1),
+ /**
+ * DISABLE_TABLE_PRE_OPERATION = 2;
+ */
+ DISABLE_TABLE_PRE_OPERATION(1, 2),
+ /**
+ * DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3;
+ */
+ DISABLE_TABLE_SET_DISABLING_TABLE_STATE(2, 3),
+ /**
+ * DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4;
+ */
+ DISABLE_TABLE_MARK_REGIONS_OFFLINE(3, 4),
+ /**
+ * DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5;
+ */
+ DISABLE_TABLE_SET_DISABLED_TABLE_STATE(4, 5),
+ /**
+ * DISABLE_TABLE_POST_OPERATION = 6;
+ */
+ DISABLE_TABLE_POST_OPERATION(5, 6),
+ ;
+
+ /**
+ * DISABLE_TABLE_PREPARE = 1;
+ */
+ public static final int DISABLE_TABLE_PREPARE_VALUE = 1;
+ /**
+ * DISABLE_TABLE_PRE_OPERATION = 2;
+ */
+ public static final int DISABLE_TABLE_PRE_OPERATION_VALUE = 2;
+ /**
+ * DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3;
+ */
+ public static final int DISABLE_TABLE_SET_DISABLING_TABLE_STATE_VALUE = 3;
+ /**
+ * DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4;
+ */
+ public static final int DISABLE_TABLE_MARK_REGIONS_OFFLINE_VALUE = 4;
+ /**
+ * DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5;
+ */
+ public static final int DISABLE_TABLE_SET_DISABLED_TABLE_STATE_VALUE = 5;
+ /**
+ * DISABLE_TABLE_POST_OPERATION = 6;
+ */
+ public static final int DISABLE_TABLE_POST_OPERATION_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static DisableTableState valueOf(int value) {
+ switch (value) {
+ case 1: return DISABLE_TABLE_PREPARE;
+ case 2: return DISABLE_TABLE_PRE_OPERATION;
+ case 3: return DISABLE_TABLE_SET_DISABLING_TABLE_STATE;
+ case 4: return DISABLE_TABLE_MARK_REGIONS_OFFLINE;
+ case 5: return DISABLE_TABLE_SET_DISABLED_TABLE_STATE;
+ case 6: return DISABLE_TABLE_POST_OPERATION;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public DisableTableState findValueByNumber(int number) {
+ return DisableTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
+ }
+
+ private static final DisableTableState[] VALUES = values();
+
+ public static DisableTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private DisableTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:DisableTableState)
+ }
+
public interface CreateTableStateDataOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -7620,120 +7856,1901 @@ public final class MasterProcedureProtos {
// @@protoc_insertion_point(class_scope:DeleteColumnFamilyStateData)
}
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_CreateTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_CreateTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ModifyTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ModifyTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DeleteTableStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DeleteTableStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_AddColumnFamilyStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_AddColumnFamilyStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_ModifyColumnFamilyStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_ModifyColumnFamilyStateData_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_DeleteColumnFamilyStateData_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_DeleteColumnFamilyStateData_fieldAccessorTable;
+ public interface EnableTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
+ // required .UserInformation user_info = 1;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ boolean hasUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * required .TableName table_name = 2;
+ */
+ boolean hasTableName();
+ /**
+ * required .TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * required .TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required bool skip_table_state_check = 3;
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ boolean hasSkipTableStateCheck();
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ boolean getSkipTableStateCheck();
}
- private static com.google.protobuf.Descriptors.FileDescriptor
- descriptor;
- static {
- java.lang.String[] descriptorData = {
- "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" +
- "C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
- "_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" +
- "chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" +
- "\030\003 \003(\0132\013.RegionInfo\"\277\001\n\024ModifyTableState" +
- "Data\022#\n\tuser_info\030\001 \002(\0132\020.UserInformatio" +
- "n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
- "leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
- "\014.TableSchema\022&\n\036delete_column_family_in" +
- "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
- "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
- "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
- "o\030\003 \003(\0132\013.RegionInfo\"\300\001\n\030AddColumnFamily" +
- "StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" +
- "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220" +
- "\n\023columnfamily_schema\030\003 \002(\0132\023.ColumnFami" +
- "lySchema\022-\n\027unmodified_table_schema\030\004 \001(" +
- "\0132\014.TableSchema\"\303\001\n\033ModifyColumnFamilySt" +
- "ateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInforma" +
- "tion\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023",
- "columnfamily_schema\030\003 \002(\0132\023.ColumnFamily" +
- "Schema\022-\n\027unmodified_table_schema\030\004 \001(\0132" +
- "\014.TableSchema\"\254\001\n\033DeleteColumnFamilyStat" +
- "eData\022#\n\tuser_info\030\001 \002(\0132\020.UserInformati" +
- "on\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\031\n\021co" +
- "lumnfamily_name\030\003 \002(\014\022-\n\027unmodified_tabl" +
- "e_schema\030\004 \001(\0132\014.TableSchema*\330\001\n\020CreateT" +
- "ableState\022\036\n\032CREATE_TABLE_PRE_OPERATION\020" +
- "\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030C" +
- "REATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABL",
- "E_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDAT" +
- "E_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPER" +
- "ATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY_T" +
- "ABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERA" +
- "TION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_DESC" +
- "RIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_" +
- "COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOU" +
- "T\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006\022#\n\037" +
- "MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\337\001\n\020De" +
- "leteTableState\022\036\n\032DELETE_TABLE_PRE_OPERA",
- "TION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020" +
- "\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036D" +
- "ELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELET" +
- "E_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TAB" +
- "LE_POST_OPERATION\020\006*\331\001\n\024AddColumnFamilyS" +
- "tate\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037A" +
- "DD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_" +
- "COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022" +
- "$\n ADD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n" +
- "$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*",
- "\353\001\n\027ModifyColumnFamilyState\022 \n\034MODIFY_CO" +
- "LUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_F" +
- "AMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_F" +
- "AMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIF" +
- "Y_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODI" +
- "FY_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002" +
- "\n\027DeleteColumnFamilyState\022 \n\034DELETE_COLU" +
- "MN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAM" +
- "ILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAM" +
- "ILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_",
- "COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELE" +
- "TE_COLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DEL" +
- "ETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006BK" +
- "\n*org.apache.hadoop.hbase.protobuf.gener" +
- "atedB\025MasterProcedureProtosH\001\210\001\001\240\001\001"
- };
- com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
- new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
- public com.google.protobuf.ExtensionRegistry assignDescriptors(
- com.google.protobuf.Descriptors.FileDescriptor root) {
- descriptor = root;
- internal_static_CreateTableStateData_descriptor =
- getDescriptor().getMessageTypes().get(0);
- internal_static_CreateTableStateData_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ /**
+ * Protobuf type {@code EnableTableStateData}
+ */
+ public static final class EnableTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements EnableTableStateDataOrBuilder {
+ // Use EnableTableStateData.newBuilder() to construct.
+ private EnableTableStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private EnableTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final EnableTableStateData defaultInstance;
+ public static EnableTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public EnableTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private EnableTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public EnableTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new EnableTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * required .TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ public static final int SKIP_TABLE_STATE_CHECK_FIELD_NUMBER = 3;
+ private boolean skipTableStateCheck_;
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ skipTableStateCheck_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(3, skipTableStateCheck_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(3, skipTableStateCheck_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasSkipTableStateCheck() == other.hasSkipTableStateCheck());
+ if (hasSkipTableStateCheck()) {
+ result = result && (getSkipTableStateCheck()
+ == other.getSkipTableStateCheck());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasSkipTableStateCheck()) {
+ hash = (37 * hash) + SKIP_TABLE_STATE_CHECK_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getSkipTableStateCheck());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code EnableTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ skipTableStateCheck_ = false;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.skipTableStateCheck_ = skipTableStateCheck_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasSkipTableStateCheck()) {
+ setSkipTableStateCheck(other.getSkipTableStateCheck());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * required .TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ private boolean skipTableStateCheck_ ;
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public Builder setSkipTableStateCheck(boolean value) {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public Builder clearSkipTableStateCheck() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ skipTableStateCheck_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:EnableTableStateData)
+ }
+
+ static {
+ defaultInstance = new EnableTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:EnableTableStateData)
+ }
+
+ public interface DisableTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ boolean hasUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * required .TableName table_name = 2;
+ */
+ boolean hasTableName();
+ /**
+ * required .TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * required .TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // required bool skip_table_state_check = 3;
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ boolean hasSkipTableStateCheck();
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ boolean getSkipTableStateCheck();
+ }
+ /**
+ * Protobuf type {@code DisableTableStateData}
+ */
+ public static final class DisableTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements DisableTableStateDataOrBuilder {
+ // Use DisableTableStateData.newBuilder() to construct.
+ private DisableTableStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DisableTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DisableTableStateData defaultInstance;
+ public static DisableTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DisableTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DisableTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public DisableTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DisableTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * required .TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ public static final int SKIP_TABLE_STATE_CHECK_FIELD_NUMBER = 3;
+ private boolean skipTableStateCheck_;
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ skipTableStateCheck_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(3, skipTableStateCheck_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(3, skipTableStateCheck_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasSkipTableStateCheck() == other.hasSkipTableStateCheck());
+ if (hasSkipTableStateCheck()) {
+ result = result && (getSkipTableStateCheck()
+ == other.getSkipTableStateCheck());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasSkipTableStateCheck()) {
+ hash = (37 * hash) + SKIP_TABLE_STATE_CHECK_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getSkipTableStateCheck());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code DisableTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ skipTableStateCheck_ = false;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.skipTableStateCheck_ = skipTableStateCheck_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (other.hasSkipTableStateCheck()) {
+ setSkipTableStateCheck(other.getSkipTableStateCheck());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasSkipTableStateCheck()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * required .TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // required bool skip_table_state_check = 3;
+ private boolean skipTableStateCheck_ ;
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean hasSkipTableStateCheck() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public boolean getSkipTableStateCheck() {
+ return skipTableStateCheck_;
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public Builder setSkipTableStateCheck(boolean value) {
+ bitField0_ |= 0x00000004;
+ skipTableStateCheck_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool skip_table_state_check = 3;
+ */
+ public Builder clearSkipTableStateCheck() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ skipTableStateCheck_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:DisableTableStateData)
+ }
+
+ static {
+ defaultInstance = new DisableTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DisableTableStateData)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CreateTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CreateTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ModifyTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ModifyTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_AddColumnFamilyStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_AddColumnFamilyStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ModifyColumnFamilyStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ModifyColumnFamilyStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteColumnFamilyStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteColumnFamilyStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_EnableTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_EnableTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DisableTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DisableTableStateData_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" +
+ "C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
+ "_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" +
+ "chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" +
+ "\030\003 \003(\0132\013.RegionInfo\"\277\001\n\024ModifyTableState" +
+ "Data\022#\n\tuser_info\030\001 \002(\0132\020.UserInformatio" +
+ "n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" +
+ "leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" +
+ "\014.TableSchema\022&\n\036delete_column_family_in" +
+ "_modify\030\004 \002(\010\"}\n\024DeleteTableStateData\022#\n",
+ "\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nta" +
+ "ble_name\030\002 \002(\0132\n.TableName\022 \n\013region_inf" +
+ "o\030\003 \003(\0132\013.RegionInfo\"\300\001\n\030AddColumnFamily" +
+ "StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" +
+ "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220" +
+ "\n\023columnfamily_schema\030\003 \002(\0132\023.ColumnFami" +
+ "lySchema\022-\n\027unmodified_table_schema\030\004 \001(" +
+ "\0132\014.TableSchema\"\303\001\n\033ModifyColumnFamilySt" +
+ "ateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInforma" +
+ "tion\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023",
+ "columnfamily_schema\030\003 \002(\0132\023.ColumnFamily" +
+ "Schema\022-\n\027unmodified_table_schema\030\004 \001(\0132" +
+ "\014.TableSchema\"\254\001\n\033DeleteColumnFamilyStat" +
+ "eData\022#\n\tuser_info\030\001 \002(\0132\020.UserInformati" +
+ "on\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\031\n\021co" +
+ "lumnfamily_name\030\003 \002(\014\022-\n\027unmodified_tabl" +
+ "e_schema\030\004 \001(\0132\014.TableSchema\"{\n\024EnableTa" +
+ "bleStateData\022#\n\tuser_info\030\001 \002(\0132\020.UserIn" +
+ "formation\022\036\n\ntable_name\030\002 \002(\0132\n.TableNam" +
+ "e\022\036\n\026skip_table_state_check\030\003 \002(\010\"|\n\025Dis",
+ "ableTableStateData\022#\n\tuser_info\030\001 \002(\0132\020." +
+ "UserInformation\022\036\n\ntable_name\030\002 \002(\0132\n.Ta" +
+ "bleName\022\036\n\026skip_table_state_check\030\003 \002(\010*" +
+ "\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE" +
+ "_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LA" +
+ "YOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033" +
+ "CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_" +
+ "TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABL" +
+ "E_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022" +
+ "\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABL",
+ "E_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE" +
+ "_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMO" +
+ "VE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELE" +
+ "TE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPER" +
+ "ATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIO" +
+ "NS\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TABL" +
+ "E_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOVE" +
+ "_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_LA" +
+ "YOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACHE" +
+ "\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037\n",
+ "\033DELETE_TABLE_POST_OPERATION\020\006*\331\001\n\024AddCo" +
+ "lumnFamilyState\022\035\n\031ADD_COLUMN_FAMILY_PRE" +
+ "PARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERATIO" +
+ "N\020\002\022-\n)ADD_COLUMN_FAMILY_UPDATE_TABLE_DE" +
+ "SCRIPTOR\020\003\022$\n ADD_COLUMN_FAMILY_POST_OPE" +
+ "RATION\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_ALL" +
+ "_REGIONS\020\005*\353\001\n\027ModifyColumnFamilyState\022 " +
+ "\n\034MODIFY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"MODI" +
+ "FY_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,MODI" +
+ "FY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR",
+ "\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POST_OPERATIO" +
+ "N\020\004\022+\n\'MODIFY_COLUMN_FAMILY_REOPEN_ALL_R" +
+ "EGIONS\020\005*\226\002\n\027DeleteColumnFamilyState\022 \n\034" +
+ "DELETE_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DELETE" +
+ "_COLUMN_FAMILY_PRE_OPERATION\020\002\0220\n,DELETE" +
+ "_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003" +
+ "\022)\n%DELETE_COLUMN_FAMILY_DELETE_FS_LAYOU" +
+ "T\020\004\022\'\n#DELETE_COLUMN_FAMILY_POST_OPERATI" +
+ "ON\020\005\022+\n\'DELETE_COLUMN_FAMILY_REOPEN_ALL_" +
+ "REGIONS\020\006*\350\001\n\020EnableTableState\022\030\n\024ENABLE",
+ "_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_OPE" +
+ "RATION\020\002\022)\n%ENABLE_TABLE_SET_ENABLING_TA" +
+ "BLE_STATE\020\003\022$\n ENABLE_TABLE_MARK_REGIONS" +
+ "_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_ENABLED_TA" +
+ "BLE_STATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPERATI" +
+ "ON\020\006*\362\001\n\021DisableTableState\022\031\n\025DISABLE_TA" +
+ "BLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OPERA" +
+ "TION\020\002\022+\n\'DISABLE_TABLE_SET_DISABLING_TA" +
+ "BLE_STATE\020\003\022&\n\"DISABLE_TABLE_MARK_REGION" +
+ "S_OFFLINE\020\004\022*\n&DISABLE_TABLE_SET_DISABLE",
+ "D_TABLE_STATE\020\005\022 \n\034DISABLE_TABLE_POST_OP" +
+ "ERATION\020\006BK\n*org.apache.hadoop.hbase.pro" +
+ "tobuf.generatedB\025MasterProcedureProtosH\001" +
+ "\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_CreateTableStateData_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_CreateTableStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CreateTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableSchema", "RegionInfo", });
internal_static_ModifyTableStateData_descriptor =
@@ -7766,6 +9783,18 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilyName", "UnmodifiedTableSchema", });
+ internal_static_EnableTableStateData_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_EnableTableStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_EnableTableStateData_descriptor,
+ new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", });
+ internal_static_DisableTableStateData_descriptor =
+ getDescriptor().getMessageTypes().get(7);
+ internal_static_DisableTableStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_DisableTableStateData_descriptor,
+ new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", });
return null;
}
};
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index a07516d..a9ad0e0 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -135,3 +135,33 @@ message DeleteColumnFamilyStateData {
required bytes columnfamily_name = 3;
optional TableSchema unmodified_table_schema = 4;
}
+
+enum EnableTableState {
+ ENABLE_TABLE_PREPARE = 1;
+ ENABLE_TABLE_PRE_OPERATION = 2;
+ ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3;
+ ENABLE_TABLE_MARK_REGIONS_ONLINE = 4;
+ ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5;
+ ENABLE_TABLE_POST_OPERATION = 6;
+}
+
+message EnableTableStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required bool skip_table_state_check = 3;
+}
+
+enum DisableTableState {
+ DISABLE_TABLE_PREPARE = 1;
+ DISABLE_TABLE_PRE_OPERATION = 2;
+ DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3;
+ DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4;
+ DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5;
+ DISABLE_TABLE_POST_OPERATION = 6;
+}
+
+message DisableTableStateData {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ required bool skip_table_state_check = 3;
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 157f80b..d8ceeef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -90,14 +90,14 @@ import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
-import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
@@ -1708,11 +1708,24 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preEnableTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " enable " + tableName);
- this.service.submit(new EnableTableHandler(this, tableName,
- assignmentManager, tableLockManager, false).prepare());
+
+ // Execute the operation asynchronously - client will check the progress of the operation
+ final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
+ long procId =
+ this.procedureExecutor.submitProcedure(new EnableTableProcedure(procedureExecutor
+ .getEnvironment(), tableName, false, prepareLatch));
+ // Before returning to client, we want to make sure that the table is prepared to be
+ // enabled (the table is locked and the table state is set).
+ //
+ // Note: if the procedure throws exception, we will catch it and rethrow.
+ prepareLatch.await();
+
if (cpHost != null) {
cpHost.postEnableTable(tableName);
- }
+ }
+
+ // TODO: return procId as part of client-side change
+ // return procId;
}
@Override
@@ -1722,11 +1735,25 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preDisableTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " disable " + tableName);
- this.service.submit(new DisableTableHandler(this, tableName,
- assignmentManager, tableLockManager, false).prepare());
+
+ // Execute the operation asynchronously - client will check the progress of the operation
+ final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch();
+ // Execute the operation asynchronously - client will check the progress of the operation
+ long procId =
+ this.procedureExecutor.submitProcedure(new DisableTableProcedure(procedureExecutor
+ .getEnvironment(), tableName, false, prepareLatch));
+ // Before returning to client, we want to make sure that the table is prepared to be
+ // enabled (the table is locked and the table state is set).
+ //
+ // Note: if the procedure throws exception, we will catch it and rethrow.
+ prepareLatch.await();
+
if (cpHost != null) {
cpHost.postDisableTable(tableName);
}
+
+ // TODO: return procId as part of client-side change
+ // return procId;
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
new file mode 100644
index 0000000..bd8f29e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -0,0 +1,562 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.exceptions.HBaseException;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkAssigner;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.htrace.Trace;
+
+@InterfaceAudience.Private
+public class DisableTableProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DisableTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ // This is for back compatible with 1.0 asynchronized operations.
+ private final ProcedurePrepareLatch syncLatch;
+
+ private TableName tableName;
+ private boolean skipTableStateCheck;
+ private UserGroupInformation user;
+
+ private Boolean traceEnabled = null;
+
+ enum MarkRegionOfflineOpResult {
+ MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL,
+ BULK_ASSIGN_REGIONS_FAILED,
+ MARK_ALL_REGIONS_OFFLINE_INTERRUPTED,
+ }
+
+ public DisableTableProcedure() {
+ syncLatch = null;
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ * @throws IOException
+ */
+ public DisableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck) throws IOException {
+ this(env, tableName, skipTableStateCheck, null);
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ * @throws IOException
+ */
+ public DisableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck,
+ final ProcedurePrepareLatch syncLatch) throws IOException {
+ this.tableName = tableName;
+ this.skipTableStateCheck = skipTableStateCheck;
+ this.user = env.getRequestUser().getUGI();
+
+ // Compatible with 1.0: We use latch to make sure that this procedure implementation is
+ // compatible with 1.0 asynchronized operations. We need to lock the table and check
+ // whether the Disable operation could be performed (table exists and online; table state
+ // is ENABLED). Once it is done, we are good to release the latch and the client can
+ // start asynchronously wait for the operation.
+ //
+ // Note: the member syncLatch could be null if we are in failover or recovery scenario.
+ // This is ok for backward compatible, as 1.0 client would not able to peek at procedure.
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTableState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case DISABLE_TABLE_PREPARE:
+ if (prepareDisable(env)) {
+ setNextState(DisableTableState.DISABLE_TABLE_PRE_OPERATION);
+ } else {
+ assert isFailed() : "disable should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+ break;
+ case DISABLE_TABLE_PRE_OPERATION:
+ preDisable(env, state);
+ setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLING_TABLE_STATE);
+ break;
+ case DISABLE_TABLE_SET_DISABLING_TABLE_STATE:
+ setTableStateToDisabling(env, tableName);
+ setNextState(DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE);
+ break;
+ case DISABLE_TABLE_MARK_REGIONS_OFFLINE:
+ if (markRegionsOffline(env, tableName, true) ==
+ MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
+ setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE);
+ } else {
+ LOG.trace("Retrying later to disable the missing regions");
+ }
+ break;
+ case DISABLE_TABLE_SET_DISABLED_TABLE_STATE:
+ setTableStateToDisabled(env, tableName);
+ setNextState(DisableTableState.DISABLE_TABLE_POST_OPERATION);
+ break;
+ case DISABLE_TABLE_POST_OPERATION:
+ postDisable(env, state);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|HBaseException|IOException e) {
+ LOG.warn("Retriable error trying to disable table=" + tableName + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException {
+ if (state == DisableTableState.DISABLE_TABLE_PREPARE) {
+ undoTableStateChange(env);
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ return;
+ }
+
+ // The delete doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+
+ @Override
+ protected DisableTableState getState(final int stateId) {
+ return DisableTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final DisableTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected DisableTableState getInitialState() {
+ return DisableTableState.DISABLE_TABLE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(final DisableTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("disable-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_DISABLE_TABLE.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DisableTableStateData.Builder disableTableMsg =
+ MasterProcedureProtos.DisableTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setSkipTableStateCheck(skipTableStateCheck);
+
+ disableTableMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.DisableTableStateData disableTableMsg =
+ MasterProcedureProtos.DisableTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(disableTableMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(disableTableMsg.getTableName());
+ skipTableStateCheck = disableTableMsg.getSkipTableStateCheck();
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.DISABLE;
+ }
+
+ /**
+ * Action before any real action of disabling table. Set the exception in the procedure instead
+ * of throwing it. This approach is to deal with backward compatible with 1.0.
+ * @param env MasterProcedureEnv
+ * @throws HBaseException
+ * @throws IOException
+ */
+ private boolean prepareDisable(final MasterProcedureEnv env) throws HBaseException, IOException {
+ boolean canTableBeDisabled = true;
+ if (tableName.equals(TableName.META_TABLE_NAME)) {
+ setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table"));
+ canTableBeDisabled = false;
+ } else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ setFailure("master-disable-table", new TableNotFoundException(tableName));
+ canTableBeDisabled = false;
+ } else if (!skipTableStateCheck) {
+ // There could be multiple client requests trying to disable or enable
+ // the table at the same time. Ensure only the first request is honored
+ // After that, no other requests can be accepted until the table reaches
+ // DISABLED or ENABLED.
+ //
+ // Note: A quick state check should be enough for us to move forward. However, instead of
+ // calling TableStateManager.isTableState() to just check the state, we called
+ // TableStateManager.setTableStateIfInStates() to set the state to DISABLING from ENABLED.
+ // This is because we treat empty state as enabled from 0.92-clusters. See
+ // ZKTableStateManager.setTableStateIfInStates() that has a hack solution to work around
+ // this issue.
+ TableStateManager tsm =
+ env.getMasterServices().getAssignmentManager().getTableStateManager();
+ if (!tsm.setTableStateIfInStates(tableName, ZooKeeperProtos.Table.State.DISABLING,
+ ZooKeeperProtos.Table.State.DISABLING, ZooKeeperProtos.Table.State.ENABLED)) {
+ LOG.info("Table " + tableName + " isn't enabled; skipping disable");
+ setFailure("master-disable-table", new TableNotEnabledException(tableName));
+ canTableBeDisabled = false;
+ }
+ }
+
+ // We are done the check. Future actions in this procedure could be done asynchronously.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+
+ return canTableBeDisabled;
+ }
+
+ /**
+ * Rollback of table state change in prepareDisable()
+ * @param env MasterProcedureEnv
+ */
+ private void undoTableStateChange(final MasterProcedureEnv env) {
+ if (!skipTableStateCheck) {
+ try {
+ // If the state was changed, undo it.
+ if (env.getMasterServices().getAssignmentManager().getTableStateManager().isTableState(
+ tableName, ZooKeeperProtos.Table.State.DISABLING)) {
+ EnableTableProcedure.setTableStateToEnabled(env, tableName);
+ }
+ } catch (Exception e) {
+ // Ignore exception.
+ }
+ }
+ }
+
+ /**
+ * Action before disabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ protected void preDisable(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Mark table state to Disabling
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ protected static void setTableStateToDisabling(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws HBaseException, IOException {
+ // Set table disabling flag up in zk.
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ ZooKeeperProtos.Table.State.DISABLING);
+ }
+
+ /**
+ * Mark regions of the table offline with retries
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @param retryRequired whether to retry if the first run failed
+ * @return whether the operation is fully completed or being interrupted.
+ * @throws IOException
+ */
+ protected static MarkRegionOfflineOpResult markRegionsOffline(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final Boolean retryRequired) throws IOException {
+ // Dev consideration: add a config to control max number of retry. For now, it is hard coded.
+ int maxTry = (retryRequired ? 10 : 1);
+ MarkRegionOfflineOpResult operationResult =
+ MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED;
+ do {
+ try {
+ operationResult = markRegionsOffline(env, tableName);
+ if (operationResult == MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
+ break;
+ }
+ maxTry--;
+ } catch (Exception e) {
+ LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e);
+ maxTry--;
+ if (maxTry > 0) {
+ continue; // we still have some retry left, try again.
+ }
+ throw e;
+ }
+ } while (maxTry > 0);
+
+ if (operationResult != MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) {
+ LOG.warn("Some or all regions of the Table '" + tableName + "' were still online");
+ }
+
+ return operationResult;
+ }
+
+ /**
+ * Mark regions of the table offline
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @return whether the operation is fully completed or being interrupted.
+ * @throws IOException
+ */
+ private static MarkRegionOfflineOpResult markRegionsOffline(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ // Get list of online regions that are of this table. Regions that are
+ // already closed will not be included in this list; i.e. the returned
+ // list is not ALL regions in a table, its all online regions according
+ // to the in-memory state on this master.
+ MarkRegionOfflineOpResult operationResult =
+ MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL;
+ final List regions =
+ env.getMasterServices().getAssignmentManager().getRegionStates()
+ .getRegionsOfTable(tableName);
+ if (regions.size() > 0) {
+ LOG.info("Offlining " + regions.size() + " regions.");
+
+ BulkDisabler bd = new BulkDisabler(env, tableName, regions);
+ try {
+ if (!bd.bulkAssign()) {
+ operationResult = MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED;
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Disable was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ operationResult = MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_INTERRUPTED;
+ }
+ }
+ return operationResult;
+ }
+
+ /**
+ * Mark table state to Disabled
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ protected static void setTableStateToDisabled(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws HBaseException, IOException {
+ // Flip the table to disabled
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ ZooKeeperProtos.Table.State.DISABLED);
+ LOG.info("Disabled table, " + tableName + ", is completed.");
+ }
+
+ /**
+ * Action after disabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ protected void postDisable(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final DisableTableState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case DISABLE_TABLE_PRE_OPERATION:
+ cpHost.preDisableTableHandler(tableName);
+ break;
+ case DISABLE_TABLE_POST_OPERATION:
+ cpHost.postDisableTableHandler(tableName);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+
+ /**
+ * Run bulk disable.
+ */
+ private static class BulkDisabler extends BulkAssigner {
+ private final AssignmentManager assignmentManager;
+ private final List regions;
+ private final TableName tableName;
+ private final int waitingTimeForEvents;
+
+ public BulkDisabler(final MasterProcedureEnv env, final TableName tableName,
+ final List regions) {
+ super(env.getMasterServices());
+ this.assignmentManager = env.getMasterServices().getAssignmentManager();
+ this.tableName = tableName;
+ this.regions = regions;
+ this.waitingTimeForEvents =
+ env.getMasterServices().getConfiguration()
+ .getInt("hbase.master.event.waiting.time", 1000);
+ }
+
+ @Override
+ protected void populatePool(ExecutorService pool) {
+ RegionStates regionStates = assignmentManager.getRegionStates();
+ for (final HRegionInfo region : regions) {
+ if (regionStates.isRegionInTransition(region)
+ && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
+ continue;
+ }
+ pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler", new Runnable() {
+ @Override
+ public void run() {
+ assignmentManager.unassign(region);
+ }
+ }));
+ }
+ }
+
+ @Override
+ protected boolean waitUntilDone(long timeout) throws InterruptedException {
+ long startTime = EnvironmentEdgeManager.currentTime();
+ long remaining = timeout;
+ List regions = null;
+ long lastLogTime = startTime;
+ while (!server.isStopped() && remaining > 0) {
+ Thread.sleep(waitingTimeForEvents);
+ regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName);
+ long now = EnvironmentEdgeManager.currentTime();
+ // Don't log more than once every ten seconds. Its obnoxious. And only log table regions
+ // if we are waiting a while for them to go down...
+ if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) {
+ lastLogTime = now;
+ LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions);
+ }
+ if (regions.isEmpty()) break;
+ remaining = timeout - (now - startTime);
+ }
+ return regions != null && regions.isEmpty();
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
new file mode 100644
index 0000000..989e81a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
@@ -0,0 +1,587 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.exceptions.HBaseException;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkAssigner;
+import org.apache.hadoop.hbase.master.GeneralBulkAssigner;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class EnableTableProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(EnableTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ // This is for back compatible with 1.0 asynchronized operations.
+ private final ProcedurePrepareLatch syncLatch;
+
+ private TableName tableName;
+ private boolean skipTableStateCheck;
+ private UserGroupInformation user;
+
+ private Boolean traceEnabled = null;
+
+ public EnableTableProcedure() {
+ syncLatch = null;
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ * @throws IOException
+ */
+ public EnableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck) throws IOException {
+ this(env, tableName, skipTableStateCheck, null);
+ }
+
+ /**
+ * Constructor
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ * @param tableName the table to operate on
+ * @param skipTableStateCheck whether to check table state
+ */
+ public EnableTableProcedure(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final boolean skipTableStateCheck,
+ final ProcedurePrepareLatch syncLatch) throws IOException {
+ this.tableName = tableName;
+ this.skipTableStateCheck = skipTableStateCheck;
+ this.user = env.getRequestUser().getUGI();
+
+ // Compatible with 1.0: We use latch to make sure that this procedure implementation is
+ // compatible with 1.0 asynchronized operations. We need to lock the table and check
+ // whether the Enable operation could be performed (table exists and offline; table state
+ // is DISABLED). Once it is done, we are good to release the latch and the client can
+ // start asynchronously wait for the operation.
+ //
+ // Note: the member syncLatch could be null if we are in failover or recovery scenario.
+ // This is ok for backward compatible, as 1.0 client would not able to peek at procedure.
+ this.syncLatch = syncLatch;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case ENABLE_TABLE_PREPARE:
+ if (prepareEnable(env)) {
+ setNextState(EnableTableState.ENABLE_TABLE_PRE_OPERATION);
+ } else {
+ assert isFailed() : "enable should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+ break;
+ case ENABLE_TABLE_PRE_OPERATION:
+ preEnable(env, state);
+ setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLING_TABLE_STATE);
+ break;
+ case ENABLE_TABLE_SET_ENABLING_TABLE_STATE:
+ setTableStateToEnabling(env, tableName);
+ setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
+ break;
+ case ENABLE_TABLE_MARK_REGIONS_ONLINE:
+ markRegionsOnline(env, tableName, true);
+ setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLED_TABLE_STATE);
+ break;
+ case ENABLE_TABLE_SET_ENABLED_TABLE_STATE:
+ setTableStateToEnabled(env, tableName);
+ setNextState(EnableTableState.ENABLE_TABLE_POST_OPERATION);
+ break;
+ case ENABLE_TABLE_POST_OPERATION:
+ postEnable(env, state);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|HBaseException|IOException e) {
+ LOG.error("Error trying to enable table=" + tableName + " state=" + state, e);
+ setFailure("master-enable-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case ENABLE_TABLE_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo (eg. DisableTableProcedure.preDisable())?
+ break;
+ case ENABLE_TABLE_SET_ENABLED_TABLE_STATE:
+ DisableTableProcedure.setTableStateToDisabling(env, tableName);
+ break;
+ case ENABLE_TABLE_MARK_REGIONS_ONLINE:
+ markRegionsOfflineDuringRecovery(env);
+ break;
+ case ENABLE_TABLE_SET_ENABLING_TABLE_STATE:
+ DisableTableProcedure.setTableStateToDisabled(env, tableName);
+ break;
+ case ENABLE_TABLE_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to undo (eg. DisableTableProcedure.postDisable())?
+ break;
+ case ENABLE_TABLE_PREPARE:
+ // Nothing to undo for this state.
+ // We do need to count down the latch count so that we don't stuck.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+ break;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (HBaseException e) {
+ LOG.warn("Failed enable table rollback attempt step=" + state + " table=" + tableName, e);
+ throw new IOException(e);
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed enable table rollback attempt step=" + state + " table=" + tableName, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected EnableTableState getState(final int stateId) {
+ return EnableTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final EnableTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected EnableTableState getInitialState() {
+ return EnableTableState.ENABLE_TABLE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(final EnableTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("Enable-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ tableName,
+ EventType.C_M_ENABLE_TABLE.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(tableName);
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.EnableTableStateData.Builder enableTableMsg =
+ MasterProcedureProtos.EnableTableStateData.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName))
+ .setSkipTableStateCheck(skipTableStateCheck);
+
+ enableTableMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.EnableTableStateData enableTableMsg =
+ MasterProcedureProtos.EnableTableStateData.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(enableTableMsg.getUserInfo());
+ tableName = ProtobufUtil.toTableName(enableTableMsg.getTableName());
+ skipTableStateCheck = enableTableMsg.getSkipTableStateCheck();
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.ENABLE;
+ }
+
+
+ /**
+ * Action before any real action of enabling table. Set the exception in the procedure instead
+ * of throwing it. This approach is to deal with backward compatible with 1.0.
+ * @param env MasterProcedureEnv
+ * @return whether the table passes the necessary checks
+ * @throws IOException
+ */
+ private boolean prepareEnable(final MasterProcedureEnv env) throws IOException {
+ boolean canTableBeEnabled = true;
+
+ // Check whether table exists
+ if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ setFailure("master-enable-table", new TableNotFoundException(tableName));
+ canTableBeEnabled = false;
+ } else if (!skipTableStateCheck) {
+ // There could be multiple client requests trying to disable or enable
+ // the table at the same time. Ensure only the first request is honored
+ // After that, no other requests can be accepted until the table reaches
+ // DISABLED or ENABLED.
+ //
+ // Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set
+ // the state to ENABLING from DISABLED. The implementation was done before table lock
+ // was implemented. With table lock, there is no need to set the state here (it will
+ // set the state later on). A quick state check should be enough for us to move forward.
+ TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager();
+ if (!tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
+ LOG.info("Table " + tableName + " isn't disabled; skipping enable");
+ setFailure("master-enable-table", new TableNotDisabledException(this.tableName));
+ canTableBeEnabled = false;
+ }
+ }
+
+ // We are done the check. Future actions in this procedure could be done asynchronously.
+ ProcedurePrepareLatch.releaseLatch(syncLatch, this);
+
+ return canTableBeEnabled;
+ }
+
+ /**
+ * Action before enabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preEnable(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Mark table state to Enabling
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @throws IOException
+ */
+ protected static void setTableStateToEnabling(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws HBaseException, IOException {
+ // Set table disabling flag up in zk.
+ LOG.info("Attempting to enable the table " + tableName);
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ ZooKeeperProtos.Table.State.ENABLING);
+ }
+
+ /**
+ * Mark offline regions of the table online with retry
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @param retryRequired whether to retry if the first run failed
+ * @throws IOException
+ */
+ protected static void markRegionsOnline(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final Boolean retryRequired) throws IOException {
+ // This is best effort approach to make all regions of a table online. If we fail to do
+ // that, it is ok that the table has some offline regions; user can fix it manually.
+
+ // Dev consideration: add a config to control max number of retry. For now, it is hard coded.
+ int maxTry = (retryRequired ? 10 : 1);
+ boolean done = false;
+
+ do {
+ try {
+ done = markRegionsOnline(env, tableName);
+ if (done) {
+ break;
+ }
+ maxTry--;
+ } catch (Exception e) {
+ LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e);
+ maxTry--;
+ if (maxTry > 0) {
+ continue; // we still have some retry left, try again.
+ }
+ throw e;
+ }
+ } while (maxTry > 0);
+
+ if (!done) {
+ LOG.warn("Some or all regions of the Table '" + tableName + "' were offline");
+ }
+ }
+
+ /**
+ * Mark offline regions of the table online
+ * @param env MasterProcedureEnv
+ * @param tableName the target table
+ * @return whether the operation is fully completed or being interrupted.
+ * @throws IOException
+ */
+ private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
+ final MasterServices masterServices = env.getMasterServices();
+ final ServerManager serverManager = masterServices.getServerManager();
+ boolean done = false;
+ // Get the regions of this table. We're done when all listed
+ // tables are onlined.
+ List> tableRegionsAndLocations;
+
+ if (TableName.META_TABLE_NAME.equals(tableName)) {
+ tableRegionsAndLocations =
+ new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper());
+ } else {
+ tableRegionsAndLocations =
+ MetaTableAccessor.getTableRegionsAndLocations(
+ masterServices.getZooKeeper(), masterServices.getConnection(), tableName, true);
+ }
+
+ int countOfRegionsInTable = tableRegionsAndLocations.size();
+ Map regionsToAssign =
+ regionsToAssignWithServerName(env, tableRegionsAndLocations);
+
+ // need to potentially create some regions for the replicas
+ List unrecordedReplicas =
+ AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet(
+ regionsToAssign.keySet()), masterServices);
+ Map> srvToUnassignedRegs =
+ assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas,
+ serverManager.getOnlineServersList());
+ if (srvToUnassignedRegs != null) {
+ for (Map.Entry> entry : srvToUnassignedRegs.entrySet()) {
+ for (HRegionInfo h : entry.getValue()) {
+ regionsToAssign.put(h, entry.getKey());
+ }
+ }
+ }
+
+ int offlineRegionsCount = regionsToAssign.size();
+
+ LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which "
+ + offlineRegionsCount + " are offline.");
+ if (offlineRegionsCount == 0) {
+ return true;
+ }
+
+ List onlineServers = serverManager.createDestinationServersList();
+ Map> bulkPlan =
+ env.getMasterServices().getAssignmentManager().getBalancer()
+ .retainAssignment(regionsToAssign, onlineServers);
+ if (bulkPlan != null) {
+ LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size()
+ + " server(s), retainAssignment=true");
+
+ BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true);
+ try {
+ if (ba.bulkAssign()) {
+ done = true;
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ }
+ } else {
+ LOG.info("Balancer was unable to find suitable servers for table " + tableName
+ + ", leaving unassigned");
+ }
+ return done;
+ }
+
+ /**
+ * Mark regions of the table offline during recovery
+ * @param env MasterProcedureEnv
+ */
+ private void markRegionsOfflineDuringRecovery(final MasterProcedureEnv env) {
+ try {
+ // This is a best effort attempt. We will move on even it does not succeed. We will retry
+ // several times until we giving up.
+ DisableTableProcedure.markRegionsOffline(env, tableName, true);
+ } catch (Exception e) {
+ LOG.debug("Failed to offline all regions of table " + tableName + ". Ignoring", e);
+ }
+ }
+
+ /**
+ * Mark table state to Enabled
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ protected static void setTableStateToEnabled(
+ final MasterProcedureEnv env,
+ final TableName tableName) throws HBaseException, IOException {
+ // Flip the table to Enabled
+ env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState(
+ tableName,
+ ZooKeeperProtos.Table.State.ENABLED);
+ LOG.info("Table '" + tableName + "' was successfully enabled.");
+ }
+
+ /**
+ * Action after enabling table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postEnable(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ /**
+ * @param regionsInMeta
+ * @return List of regions neither in transition nor assigned.
+ * @throws IOException
+ */
+ private static Map regionsToAssignWithServerName(
+ final MasterProcedureEnv env,
+ final List> regionsInMeta) throws IOException {
+ Map regionsToAssign =
+ new HashMap(regionsInMeta.size());
+ RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates();
+ for (Pair regionLocation : regionsInMeta) {
+ HRegionInfo hri = regionLocation.getFirst();
+ ServerName sn = regionLocation.getSecond();
+ if (regionStates.isRegionOffline(hri)) {
+ regionsToAssign.put(hri, sn);
+ } else {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Skipping assign for the region " + hri + " during enable table "
+ + hri.getTable() + " because its already in tranition or assigned.");
+ }
+ }
+ }
+ return regionsToAssign;
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case ENABLE_TABLE_PRE_OPERATION:
+ cpHost.preEnableTableHandler(getTableName());
+ break;
+ case ENABLE_TABLE_POST_OPERATION:
+ cpHost.postEnableTableHandler(getTableName());
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
index 76ca094..6928d02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.hbase.master.procedure;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.TableName;
/**
* Procedures that operates on a specific Table (e.g. create, delete, snapshot, ...)
@@ -29,7 +29,9 @@ import org.apache.hadoop.hbase.TableName;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface TableProcedureInterface {
- public enum TableOperationType { CREATE, DELETE, EDIT, READ };
+ public enum TableOperationType {
+ CREATE, DELETE, DISABLE, EDIT, ENABLE, READ,
+ };
/**
* @return the name of the table the procedure is operating on
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 4b1ab94..31fbc52 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
@@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -172,6 +174,18 @@ public class MasterProcedureTestingUtility {
return actualRegCount.get();
}
+ public static void validateTableIsEnabled(final HMaster master, final TableName tableName)
+ throws IOException {
+ TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
+ assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED));
+ }
+
+ public static void validateTableIsDisabled(final HMaster master, final TableName tableName)
+ throws IOException {
+ TableStateManager tsm = master.getAssignmentManager().getTableStateManager();
+ assertTrue(tsm.isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED));
+ }
+
public static void testRecoveryAndDoubleExecution(
final ProcedureExecutor procExec, final long procId,
final int numSteps, final TState[] states) throws Exception {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java
new file mode 100644
index 0000000..d588a3b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestDisableTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestDisableTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testDisableTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testDisableTable");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // Disable the table
+ long procId = procExec.submitProcedure(
+ new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout = 60000)
+ public void testDisableTableMultipleTimes() throws Exception {
+ final TableName tableName = TableName.valueOf("testDisableTableMultipleTimes");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // Disable the table
+ long procId1 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+
+ // Disable the table again - expect failure
+ long procId2 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Disable failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotEnabledException);
+
+ // Disable the table - expect failure from ProcedurePrepareLatch
+ try {
+ final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch();
+
+ long procId3 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, false, prepareLatch));
+ prepareLatch.await();
+ Assert.fail("Disable should throw exception through latch.");
+ } catch (TableNotEnabledException tnee) {
+ // Expected
+ LOG.debug("Disable failed with expected exception.");
+ }
+
+ // Disable the table again with skipping table state check flag (simulate recovery scenario)
+ long procId4 = procExec.submitProcedure(new DisableTableProcedure(
+ procExec.getEnvironment(), tableName, true));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId4);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId4);
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Disable procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName,
+ false));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = DisableTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ DisableTableState.values());
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java
new file mode 100644
index 0000000..8964adc
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MediumTests.class)
+public class TestEnableTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestEnableTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testEnableTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testEnableTable");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // Enable the table
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout=60000, expected=TableNotDisabledException.class)
+ public void testEnableNonDisabledTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testEnableNonExistingTable");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2");
+
+ // Enable the table - expect failure
+ long procId1 = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+
+ ProcedureResult result = procExec.getResult(procId1);
+ assertTrue(result.isFailed());
+ LOG.debug("Enable failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotDisabledException);
+
+ // Enable the table with skipping table state check flag (simulate recovery scenario)
+ long procId2 = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, true));
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+
+ // Enable the table - expect failure from ProcedurePrepareLatch
+ final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch();
+ long procId3 = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false, prepareLatch));
+ prepareLatch.await();
+ Assert.fail("Enable should throw exception through latch.");
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Enable procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = EnableTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ EnableTableState.values());
+ MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Enable procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+
+ int numberOfSteps = EnableTableState.values().length - 2; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ EnableTableState.values());
+ MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(),
+ tableName);
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
index e157d61..8a3a891 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -37,7 +37,10 @@ import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -211,6 +214,79 @@ public class TestMasterFailoverWithProcedures {
}
// ==========================================================================
+ // Test Disable Table
+ // ==========================================================================
+ @Test(timeout=60000)
+ public void testDisableTableWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestDisableTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testDisableTableWithFailoverAtStep(
+ DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE.ordinal());
+ }
+
+ private void testDisableTableWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testDisableTableWithFailoverAtStep" + step);
+
+ // create the table
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+
+ ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, DisableTableState.values());
+
+ MasterProcedureTestingUtility.validateTableIsDisabled(
+ UTIL.getHBaseCluster().getMaster(), tableName);
+ }
+
+ // ==========================================================================
+ // Test Enable Table
+ // ==========================================================================
+ @Test(timeout=60000)
+ public void testEnableTableWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestEnableTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testEnableTableWithFailoverAtStep(
+ EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE.ordinal());
+ }
+
+ private void testEnableTableWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testEnableTableWithFailoverAtStep" + step);
+
+ // create the table
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
+ testRecoveryAndDoubleExecution(UTIL, procId, step, EnableTableState.values());
+
+ MasterProcedureTestingUtility.validateTableIsEnabled(
+ UTIL.getHBaseCluster().getMaster(), tableName);
+ }
+
+ // ==========================================================================
// Test Helpers
// ==========================================================================
public static void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil,