diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 6d1694a..5c96f21 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -127,6 +127,133 @@ public final class MasterProcedureProtos {
}
/**
+ * Protobuf enum {@code ModifyTableState}
+ */
+ public enum ModifyTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * MODIFY_TABLE_PREPARE = 1;
+ */
+ MODIFY_TABLE_PREPARE(0, 1),
+ /**
+ * MODIFY_TABLE_PRE_OPERATION = 2;
+ */
+ MODIFY_TABLE_PRE_OPERATION(1, 2),
+ /**
+ * MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3;
+ */
+ MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR(2, 3),
+ /**
+ * MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4;
+ */
+ MODIFY_TABLE_REMOVE_REPLICA_COLUMN(3, 4),
+ /**
+ * MODIFY_TABLE_DELETE_FS_LAYOUT = 5;
+ */
+ MODIFY_TABLE_DELETE_FS_LAYOUT(4, 5),
+ /**
+ * MODIFY_TABLE_POST_OPERATION = 6;
+ */
+ MODIFY_TABLE_POST_OPERATION(5, 6),
+ /**
+ * MODIFY_TABLE_REOPEN_ALL_REGIONS = 7;
+ */
+ MODIFY_TABLE_REOPEN_ALL_REGIONS(6, 7),
+ ;
+
+ /**
+ * MODIFY_TABLE_PREPARE = 1;
+ */
+ public static final int MODIFY_TABLE_PREPARE_VALUE = 1;
+ /**
+ * MODIFY_TABLE_PRE_OPERATION = 2;
+ */
+ public static final int MODIFY_TABLE_PRE_OPERATION_VALUE = 2;
+ /**
+ * MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3;
+ */
+ public static final int MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR_VALUE = 3;
+ /**
+ * MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4;
+ */
+ public static final int MODIFY_TABLE_REMOVE_REPLICA_COLUMN_VALUE = 4;
+ /**
+ * MODIFY_TABLE_DELETE_FS_LAYOUT = 5;
+ */
+ public static final int MODIFY_TABLE_DELETE_FS_LAYOUT_VALUE = 5;
+ /**
+ * MODIFY_TABLE_POST_OPERATION = 6;
+ */
+ public static final int MODIFY_TABLE_POST_OPERATION_VALUE = 6;
+ /**
+ * MODIFY_TABLE_REOPEN_ALL_REGIONS = 7;
+ */
+ public static final int MODIFY_TABLE_REOPEN_ALL_REGIONS_VALUE = 7;
+
+
+ public final int getNumber() { return value; }
+
+ public static ModifyTableState valueOf(int value) {
+ switch (value) {
+ case 1: return MODIFY_TABLE_PREPARE;
+ case 2: return MODIFY_TABLE_PRE_OPERATION;
+ case 3: return MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR;
+ case 4: return MODIFY_TABLE_REMOVE_REPLICA_COLUMN;
+ case 5: return MODIFY_TABLE_DELETE_FS_LAYOUT;
+ case 6: return MODIFY_TABLE_POST_OPERATION;
+ case 7: return MODIFY_TABLE_REOPEN_ALL_REGIONS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public ModifyTableState findValueByNumber(int number) {
+ return ModifyTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(1);
+ }
+
+ private static final ModifyTableState[] VALUES = values();
+
+ public static ModifyTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private ModifyTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:ModifyTableState)
+ }
+
+ /**
* Protobuf enum {@code DeleteTableState}
*/
public enum DeleteTableState
@@ -219,7 +346,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(1);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2);
}
private static final DeleteTableState[] VALUES = values();
@@ -489,19 +616,1257 @@ public final class MasterProcedureProtos {
return regionInfo_;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableSchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(3, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableSchema() == other.hasTableSchema());
+ if (hasTableSchema()) {
+ result = result && getTableSchema()
+ .equals(other.getTableSchema());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableSchema()) {
+ hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getTableSchema().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CreateTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableSchemaFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableSchemaBuilder_ == null) {
+ result.tableSchema_ = tableSchema_;
+ } else {
+ result.tableSchema_ = tableSchemaBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableSchema()) {
+ mergeTableSchema(other.getTableSchema());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableSchema()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableSchema table_schema = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_;
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ return tableSchema_;
+ } else {
+ return tableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableSchema_ = value;
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder setTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ tableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ tableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ tableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder clearTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ if (tableSchemaBuilder_ != null) {
+ return tableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return tableSchema_;
+ }
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableSchemaFieldBuilder() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ tableSchema_,
+ getParentForChildren(),
+ isClean());
+ tableSchema_ = null;
+ }
+ return tableSchemaBuilder_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ private java.util.List regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList(regionInfo_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
+ } else {
+ return regionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addAllRegionInfo(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
+ } else {
+ regionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CreateTableStateData)
+ }
+
+ static {
+ defaultInstance = new CreateTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CreateTableStateData)
+ }
+
+ public interface ModifyTableMessageOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ boolean hasUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // optional .TableSchema unmodified_table_schema = 2;
+ /**
+ * optional .TableSchema unmodified_table_schema = 2;
+ */
+ boolean hasUnmodifiedTableSchema();
+ /**
+ * optional .TableSchema unmodified_table_schema = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema();
+ /**
+ * optional .TableSchema unmodified_table_schema = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder();
+
+ // required .TableSchema modified_table_schema = 3;
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ boolean hasModifiedTableSchema();
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema();
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder();
+
+ // required bool delete_column_family_in_modify = 4;
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ boolean hasDeleteColumnFamilyInModify();
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ boolean getDeleteColumnFamilyInModify();
+
+ // repeated .RegionInfo region_info = 5;
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ java.util.List
+ getRegionInfoList();
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ int getRegionInfoCount();
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code ModifyTableMessage}
+ */
+ public static final class ModifyTableMessage extends
+ com.google.protobuf.GeneratedMessage
+ implements ModifyTableMessageOrBuilder {
+ // Use ModifyTableMessage.newBuilder() to construct.
+ private ModifyTableMessage(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private ModifyTableMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final ModifyTableMessage defaultInstance;
+ public static ModifyTableMessage getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ModifyTableMessage getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ModifyTableMessage(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = unmodifiedTableSchema_.toBuilder();
+ }
+ unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(unmodifiedTableSchema_);
+ unmodifiedTableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = modifiedTableSchema_.toBuilder();
+ }
+ modifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(modifiedTableSchema_);
+ modifiedTableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ deleteColumnFamilyInModify_ = input.readBool();
+ break;
+ }
+ case 42: {
+ if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000010;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableMessage_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableMessage_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public ModifyTableMessage parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new ModifyTableMessage(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // optional .TableSchema unmodified_table_schema = 2;
+ public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_;
+ /**
+ * optional .TableSchema unmodified_table_schema = 2;
+ */
+ public boolean hasUnmodifiedTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .TableSchema unmodified_table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ return unmodifiedTableSchema_;
+ }
+ /**
+ * optional .TableSchema unmodified_table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ return unmodifiedTableSchema_;
+ }
+
+ // required .TableSchema modified_table_schema = 3;
+ public static final int MODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_;
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public boolean hasModifiedTableSchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() {
+ return modifiedTableSchema_;
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() {
+ return modifiedTableSchema_;
+ }
+
+ // required bool delete_column_family_in_modify = 4;
+ public static final int DELETE_COLUMN_FAMILY_IN_MODIFY_FIELD_NUMBER = 4;
+ private boolean deleteColumnFamilyInModify_;
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ public boolean hasDeleteColumnFamilyInModify() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ public boolean getDeleteColumnFamilyInModify() {
+ return deleteColumnFamilyInModify_;
+ }
+
+ // repeated .RegionInfo region_info = 5;
+ public static final int REGION_INFO_FIELD_NUMBER = 5;
+ private java.util.List regionInfo_;
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 5;
*/
public int getRegionInfoCount() {
return regionInfo_.size();
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
return regionInfo_.get(index);
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
int index) {
@@ -510,7 +1875,9 @@ public final class MasterProcedureProtos {
private void initFields() {
userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
- tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ deleteColumnFamilyInModify_ = false;
regionInfo_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
@@ -522,7 +1889,11 @@ public final class MasterProcedureProtos {
memoizedIsInitialized = 0;
return false;
}
- if (!hasTableSchema()) {
+ if (!hasModifiedTableSchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasDeleteColumnFamilyInModify()) {
memoizedIsInitialized = 0;
return false;
}
@@ -530,7 +1901,13 @@ public final class MasterProcedureProtos {
memoizedIsInitialized = 0;
return false;
}
- if (!getTableSchema().isInitialized()) {
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ if (!getModifiedTableSchema().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
@@ -551,10 +1928,16 @@ public final class MasterProcedureProtos {
output.writeMessage(1, userInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeMessage(2, tableSchema_);
+ output.writeMessage(2, unmodifiedTableSchema_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, modifiedTableSchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBool(4, deleteColumnFamilyInModify_);
}
for (int i = 0; i < regionInfo_.size(); i++) {
- output.writeMessage(3, regionInfo_.get(i));
+ output.writeMessage(5, regionInfo_.get(i));
}
getUnknownFields().writeTo(output);
}
@@ -571,11 +1954,19 @@ public final class MasterProcedureProtos {
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(2, tableSchema_);
+ .computeMessageSize(2, unmodifiedTableSchema_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, modifiedTableSchema_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(4, deleteColumnFamilyInModify_);
}
for (int i = 0; i < regionInfo_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(3, regionInfo_.get(i));
+ .computeMessageSize(5, regionInfo_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -594,10 +1985,10 @@ public final class MasterProcedureProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)) {
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage)) {
return super.equals(obj);
}
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) obj;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage) obj;
boolean result = true;
result = result && (hasUserInfo() == other.hasUserInfo());
@@ -605,10 +1996,20 @@ public final class MasterProcedureProtos {
result = result && getUserInfo()
.equals(other.getUserInfo());
}
- result = result && (hasTableSchema() == other.hasTableSchema());
- if (hasTableSchema()) {
- result = result && getTableSchema()
- .equals(other.getTableSchema());
+ result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema());
+ if (hasUnmodifiedTableSchema()) {
+ result = result && getUnmodifiedTableSchema()
+ .equals(other.getUnmodifiedTableSchema());
+ }
+ result = result && (hasModifiedTableSchema() == other.hasModifiedTableSchema());
+ if (hasModifiedTableSchema()) {
+ result = result && getModifiedTableSchema()
+ .equals(other.getModifiedTableSchema());
+ }
+ result = result && (hasDeleteColumnFamilyInModify() == other.hasDeleteColumnFamilyInModify());
+ if (hasDeleteColumnFamilyInModify()) {
+ result = result && (getDeleteColumnFamilyInModify()
+ == other.getDeleteColumnFamilyInModify());
}
result = result && getRegionInfoList()
.equals(other.getRegionInfoList());
@@ -629,9 +2030,17 @@ public final class MasterProcedureProtos {
hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
hash = (53 * hash) + getUserInfo().hashCode();
}
- if (hasTableSchema()) {
- hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
- hash = (53 * hash) + getTableSchema().hashCode();
+ if (hasUnmodifiedTableSchema()) {
+ hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getUnmodifiedTableSchema().hashCode();
+ }
+ if (hasModifiedTableSchema()) {
+ hash = (37 * hash) + MODIFIED_TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getModifiedTableSchema().hashCode();
+ }
+ if (hasDeleteColumnFamilyInModify()) {
+ hash = (37 * hash) + DELETE_COLUMN_FAMILY_IN_MODIFY_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getDeleteColumnFamilyInModify());
}
if (getRegionInfoCount() > 0) {
hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
@@ -642,53 +2051,53 @@ public final class MasterProcedureProtos {
return hash;
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(byte[] data)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
- public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -697,7 +2106,7 @@ public final class MasterProcedureProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -709,24 +2118,24 @@ public final class MasterProcedureProtos {
return builder;
}
/**
- * Protobuf type {@code CreateTableStateData}
+ * Protobuf type {@code ModifyTableMessage}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
- implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateDataOrBuilder {
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessageOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableMessage_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_fieldAccessorTable
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableMessage_fieldAccessorTable
.ensureFieldAccessorsInitialized(
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.Builder.class);
}
- // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.newBuilder()
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -739,7 +2148,8 @@ public final class MasterProcedureProtos {
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getUserInfoFieldBuilder();
- getTableSchemaFieldBuilder();
+ getUnmodifiedTableSchemaFieldBuilder();
+ getModifiedTableSchemaFieldBuilder();
getRegionInfoFieldBuilder();
}
}
@@ -755,15 +2165,23 @@ public final class MasterProcedureProtos {
userInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
- if (tableSchemaBuilder_ == null) {
- tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
} else {
- tableSchemaBuilder_.clear();
+ unmodifiedTableSchemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ modifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ deleteColumnFamilyInModify_ = false;
+ bitField0_ = (bitField0_ & ~0x00000008);
if (regionInfoBuilder_ == null) {
regionInfo_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000010);
} else {
regionInfoBuilder_.clear();
}
@@ -776,23 +2194,23 @@ public final class MasterProcedureProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableMessage_descriptor;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.getDefaultInstance();
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData build() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = buildPartial();
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData(this);
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -806,15 +2224,27 @@ public final class MasterProcedureProtos {
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
- if (tableSchemaBuilder_ == null) {
- result.tableSchema_ = tableSchema_;
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ result.unmodifiedTableSchema_ = unmodifiedTableSchema_;
} else {
- result.tableSchema_ = tableSchemaBuilder_.build();
+ result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (modifiedTableSchemaBuilder_ == null) {
+ result.modifiedTableSchema_ = modifiedTableSchema_;
+ } else {
+ result.modifiedTableSchema_ = modifiedTableSchemaBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
}
+ result.deleteColumnFamilyInModify_ = deleteColumnFamilyInModify_;
if (regionInfoBuilder_ == null) {
- if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000010);
}
result.regionInfo_ = regionInfo_;
} else {
@@ -826,27 +2256,33 @@ public final class MasterProcedureProtos {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)other);
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other) {
- if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage.getDefaultInstance()) return this;
if (other.hasUserInfo()) {
mergeUserInfo(other.getUserInfo());
}
- if (other.hasTableSchema()) {
- mergeTableSchema(other.getTableSchema());
+ if (other.hasUnmodifiedTableSchema()) {
+ mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema());
+ }
+ if (other.hasModifiedTableSchema()) {
+ mergeModifiedTableSchema(other.getModifiedTableSchema());
+ }
+ if (other.hasDeleteColumnFamilyInModify()) {
+ setDeleteColumnFamilyInModify(other.getDeleteColumnFamilyInModify());
}
if (regionInfoBuilder_ == null) {
if (!other.regionInfo_.isEmpty()) {
if (regionInfo_.isEmpty()) {
regionInfo_ = other.regionInfo_;
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureRegionInfoIsMutable();
regionInfo_.addAll(other.regionInfo_);
@@ -859,7 +2295,7 @@ public final class MasterProcedureProtos {
regionInfoBuilder_.dispose();
regionInfoBuilder_ = null;
regionInfo_ = other.regionInfo_;
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000010);
regionInfoBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getRegionInfoFieldBuilder() : null;
@@ -877,7 +2313,11 @@ public final class MasterProcedureProtos {
return false;
}
- if (!hasTableSchema()) {
+ if (!hasModifiedTableSchema()) {
+
+ return false;
+ }
+ if (!hasDeleteColumnFamilyInModify()) {
return false;
}
@@ -885,7 +2325,13 @@ public final class MasterProcedureProtos {
return false;
}
- if (!getTableSchema().isInitialized()) {
+ if (hasUnmodifiedTableSchema()) {
+ if (!getUnmodifiedTableSchema().isInitialized()) {
+
+ return false;
+ }
+ }
+ if (!getModifiedTableSchema().isInitialized()) {
return false;
}
@@ -902,11 +2348,11 @@ public final class MasterProcedureProtos {
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
- org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parsedMessage = null;
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) e.getUnfinishedMessage();
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableMessage) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
@@ -1034,130 +2480,280 @@ public final class MasterProcedureProtos {
return userInfoBuilder_;
}
- // required .TableSchema table_schema = 2;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ // optional .TableSchema unmodified_table_schema = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_;
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_;
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public boolean hasTableSchema() {
+ public boolean hasUnmodifiedTableSchema() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
- if (tableSchemaBuilder_ == null) {
- return tableSchema_;
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ return unmodifiedTableSchema_;
} else {
- return tableSchemaBuilder_.getMessage();
+ return unmodifiedTableSchemaBuilder_.getMessage();
}
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
- if (tableSchemaBuilder_ == null) {
+ public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
- tableSchema_ = value;
+ unmodifiedTableSchema_ = value;
onChanged();
} else {
- tableSchemaBuilder_.setMessage(value);
+ unmodifiedTableSchemaBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public Builder setTableSchema(
+ public Builder setUnmodifiedTableSchema(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
- if (tableSchemaBuilder_ == null) {
- tableSchema_ = builderForValue.build();
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = builderForValue.build();
onChanged();
} else {
- tableSchemaBuilder_.setMessage(builderForValue.build());
+ unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
- if (tableSchemaBuilder_ == null) {
+ public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (unmodifiedTableSchemaBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
- tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
- tableSchema_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ unmodifiedTableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial();
} else {
- tableSchema_ = value;
+ unmodifiedTableSchema_ = value;
}
onChanged();
} else {
- tableSchemaBuilder_.mergeFrom(value);
+ unmodifiedTableSchemaBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public Builder clearTableSchema() {
- if (tableSchemaBuilder_ == null) {
- tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ public Builder clearUnmodifiedTableSchema() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
onChanged();
} else {
- tableSchemaBuilder_.clear();
+ unmodifiedTableSchemaBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() {
bitField0_ |= 0x00000002;
onChanged();
- return getTableSchemaFieldBuilder().getBuilder();
+ return getUnmodifiedTableSchemaFieldBuilder().getBuilder();
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
- if (tableSchemaBuilder_ != null) {
- return tableSchemaBuilder_.getMessageOrBuilder();
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() {
+ if (unmodifiedTableSchemaBuilder_ != null) {
+ return unmodifiedTableSchemaBuilder_.getMessageOrBuilder();
} else {
- return tableSchema_;
+ return unmodifiedTableSchema_;
}
}
/**
- * required .TableSchema table_schema = 2;
+ * optional .TableSchema unmodified_table_schema = 2;
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
- getTableSchemaFieldBuilder() {
- if (tableSchemaBuilder_ == null) {
- tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ getUnmodifiedTableSchemaFieldBuilder() {
+ if (unmodifiedTableSchemaBuilder_ == null) {
+ unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
- tableSchema_,
+ unmodifiedTableSchema_,
getParentForChildren(),
isClean());
- tableSchema_ = null;
+ unmodifiedTableSchema_ = null;
}
- return tableSchemaBuilder_;
+ return unmodifiedTableSchemaBuilder_;
}
- // repeated .RegionInfo region_info = 3;
+ // required .TableSchema modified_table_schema = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> modifiedTableSchemaBuilder_;
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public boolean hasModifiedTableSchema() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ return modifiedTableSchema_;
+ } else {
+ return modifiedTableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public Builder setModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ modifiedTableSchema_ = value;
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public Builder setModifiedTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public Builder mergeModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (modifiedTableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ modifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ modifiedTableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(modifiedTableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ modifiedTableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public Builder clearModifiedTableSchema() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ modifiedTableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getModifiedTableSchemaBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getModifiedTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() {
+ if (modifiedTableSchemaBuilder_ != null) {
+ return modifiedTableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return modifiedTableSchema_;
+ }
+ }
+ /**
+ * required .TableSchema modified_table_schema = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getModifiedTableSchemaFieldBuilder() {
+ if (modifiedTableSchemaBuilder_ == null) {
+ modifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ modifiedTableSchema_,
+ getParentForChildren(),
+ isClean());
+ modifiedTableSchema_ = null;
+ }
+ return modifiedTableSchemaBuilder_;
+ }
+
+ // required bool delete_column_family_in_modify = 4;
+ private boolean deleteColumnFamilyInModify_ ;
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ public boolean hasDeleteColumnFamilyInModify() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ public boolean getDeleteColumnFamilyInModify() {
+ return deleteColumnFamilyInModify_;
+ }
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ public Builder setDeleteColumnFamilyInModify(boolean value) {
+ bitField0_ |= 0x00000008;
+ deleteColumnFamilyInModify_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool delete_column_family_in_modify = 4;
+ */
+ public Builder clearDeleteColumnFamilyInModify() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ deleteColumnFamilyInModify_ = false;
+ onChanged();
+ return this;
+ }
+
+ // repeated .RegionInfo region_info = 5;
private java.util.List regionInfo_ =
java.util.Collections.emptyList();
private void ensureRegionInfoIsMutable() {
- if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ if (!((bitField0_ & 0x00000010) == 0x00000010)) {
regionInfo_ = new java.util.ArrayList(regionInfo_);
- bitField0_ |= 0x00000004;
+ bitField0_ |= 0x00000010;
}
}
@@ -1165,7 +2761,7 @@ public final class MasterProcedureProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public java.util.List getRegionInfoList() {
if (regionInfoBuilder_ == null) {
@@ -1175,7 +2771,7 @@ public final class MasterProcedureProtos {
}
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public int getRegionInfoCount() {
if (regionInfoBuilder_ == null) {
@@ -1185,7 +2781,7 @@ public final class MasterProcedureProtos {
}
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
if (regionInfoBuilder_ == null) {
@@ -1195,7 +2791,7 @@ public final class MasterProcedureProtos {
}
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder setRegionInfo(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
@@ -1212,7 +2808,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder setRegionInfo(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
@@ -1226,7 +2822,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
if (regionInfoBuilder_ == null) {
@@ -1242,7 +2838,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder addRegionInfo(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
@@ -1259,7 +2855,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder addRegionInfo(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
@@ -1273,7 +2869,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder addRegionInfo(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
@@ -1287,7 +2883,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder addAllRegionInfo(
java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
@@ -1301,12 +2897,12 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder clearRegionInfo() {
if (regionInfoBuilder_ == null) {
regionInfo_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
+ bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
regionInfoBuilder_.clear();
@@ -1314,7 +2910,7 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public Builder removeRegionInfo(int index) {
if (regionInfoBuilder_ == null) {
@@ -1327,14 +2923,14 @@ public final class MasterProcedureProtos {
return this;
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
int index) {
return getRegionInfoFieldBuilder().getBuilder(index);
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
int index) {
@@ -1344,7 +2940,7 @@ public final class MasterProcedureProtos {
}
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
getRegionInfoOrBuilderList() {
@@ -1355,14 +2951,14 @@ public final class MasterProcedureProtos {
}
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
return getRegionInfoFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
int index) {
@@ -1370,7 +2966,7 @@ public final class MasterProcedureProtos {
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
}
/**
- * repeated .RegionInfo region_info = 3;
+ * repeated .RegionInfo region_info = 5;
*/
public java.util.List
getRegionInfoBuilderList() {
@@ -1383,7 +2979,7 @@ public final class MasterProcedureProtos {
regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
regionInfo_,
- ((bitField0_ & 0x00000004) == 0x00000004),
+ ((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
regionInfo_ = null;
@@ -1391,15 +2987,15 @@ public final class MasterProcedureProtos {
return regionInfoBuilder_;
}
- // @@protoc_insertion_point(builder_scope:CreateTableStateData)
+ // @@protoc_insertion_point(builder_scope:ModifyTableMessage)
}
static {
- defaultInstance = new CreateTableStateData(true);
+ defaultInstance = new ModifyTableMessage(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:CreateTableStateData)
+ // @@protoc_insertion_point(class_scope:ModifyTableMessage)
}
public interface DeleteTableStateDataOrBuilder
@@ -2566,6 +4162,11 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_CreateTableStateData_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_ModifyTableMessage_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_ModifyTableMessage_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_DeleteTableStateData_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -2583,23 +4184,35 @@ public final class MasterProcedureProtos {
"C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" +
"_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" +
"chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" +
- "\030\003 \003(\0132\013.RegionInfo\"}\n\024DeleteTableStateD" +
- "ata\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation" +
- "\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022 \n\013regi" +
- "on_info\030\003 \003(\0132\013.RegionInfo*\330\001\n\020CreateTab" +
- "leState\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022" +
- " \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CRE",
- "ATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_" +
- "ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_" +
- "DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERAT" +
- "ION\020\006*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TAB" +
- "LE_PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOV" +
- "E_FROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_L" +
- "AYOUT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACH" +
- "E\020\004\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037" +
- "\n\033DELETE_TABLE_POST_OPERATION\020\006BK\n*org.a" +
- "pache.hadoop.hbase.protobuf.generatedB\025M",
- "asterProcedureProtosH\001\210\001\001\240\001\001"
+ "\030\003 \003(\0132\013.RegionInfo\"\337\001\n\022ModifyTableMessa" +
+ "ge\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022" +
+ "-\n\027unmodified_table_schema\030\002 \001(\0132\014.Table" +
+ "Schema\022+\n\025modified_table_schema\030\003 \002(\0132\014." +
+ "TableSchema\022&\n\036delete_column_family_in_m" +
+ "odify\030\004 \002(\010\022 \n\013region_info\030\005 \003(\0132\013.Regio",
+ "nInfo\"}\n\024DeleteTableStateData\022#\n\tuser_in" +
+ "fo\030\001 \002(\0132\020.UserInformation\022\036\n\ntable_name" +
+ "\030\002 \002(\0132\n.TableName\022 \n\013region_info\030\003 \003(\0132" +
+ "\013.RegionInfo*\330\001\n\020CreateTableState\022\036\n\032CRE" +
+ "ATE_TABLE_PRE_OPERATION\020\001\022 \n\034CREATE_TABL" +
+ "E_WRITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE_ADD_" +
+ "TO_META\020\003\022\037\n\033CREATE_TABLE_ASSIGN_REGIONS" +
+ "\020\004\022\"\n\036CREATE_TABLE_UPDATE_DESC_CACHE\020\005\022\037" +
+ "\n\033CREATE_TABLE_POST_OPERATION\020\006*\207\002\n\020Modi" +
+ "fyTableState\022\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036",
+ "\n\032MODIFY_TABLE_PRE_OPERATION\020\002\022(\n$MODIFY" +
+ "_TABLE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n\"MODI" +
+ "FY_TABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n\035MODI" +
+ "FY_TABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TA" +
+ "BLE_POST_OPERATION\020\006\022#\n\037MODIFY_TABLE_REO" +
+ "PEN_ALL_REGIONS\020\007*\337\001\n\020DeleteTableState\022\036" +
+ "\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!\n\035DELETE" +
+ "_TABLE_REMOVE_FROM_META\020\002\022 \n\034DELETE_TABL" +
+ "E_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPDA" +
+ "TE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIGN",
+ "_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_OPERATIO" +
+ "N\020\006BK\n*org.apache.hadoop.hbase.protobuf." +
+ "generatedB\025MasterProcedureProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -2612,8 +4225,14 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CreateTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableSchema", "RegionInfo", });
- internal_static_DeleteTableStateData_descriptor =
+ internal_static_ModifyTableMessage_descriptor =
getDescriptor().getMessageTypes().get(1);
+ internal_static_ModifyTableMessage_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_ModifyTableMessage_descriptor,
+ new java.lang.String[] { "UserInfo", "UnmodifiedTableSchema", "ModifiedTableSchema", "DeleteColumnFamilyInModify", "RegionInfo", });
+ internal_static_DeleteTableStateData_descriptor =
+ getDescriptor().getMessageTypes().get(2);
internal_static_DeleteTableStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteTableStateData_descriptor,
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
index 4e9b05e..93af886 100644
--- a/hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -58,6 +58,24 @@ message CreateTableStateData {
repeated RegionInfo region_info = 3;
}
+enum ModifyTableState {
+ MODIFY_TABLE_PREPARE = 1;
+ MODIFY_TABLE_PRE_OPERATION = 2;
+ MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3;
+ MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4;
+ MODIFY_TABLE_DELETE_FS_LAYOUT = 5;
+ MODIFY_TABLE_POST_OPERATION = 6;
+ MODIFY_TABLE_REOPEN_ALL_REGIONS = 7;
+}
+
+message ModifyTableMessage {
+ required UserInformation user_info = 1;
+ optional TableSchema unmodified_table_schema = 2;
+ required TableSchema modified_table_schema = 3;
+ required bool delete_column_family_in_modify = 4;
+ repeated RegionInfo region_info = 5;
+}
+
enum DeleteTableState {
DELETE_TABLE_PRE_OPERATION = 1;
DELETE_TABLE_REMOVE_FROM_META = 2;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index e2e600c..ba739b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -18,10 +18,6 @@
*/
package org.apache.hadoop.hbase.master;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
@@ -43,10 +39,11 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Maps;
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.Service;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -93,7 +90,6 @@ import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
-import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
@@ -102,15 +98,17 @@ import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
-import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@ -146,6 +144,11 @@ import org.mortbay.jetty.Connector;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.servlet.Context;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Maps;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Service;
+
/**
* HMaster is the "master server" for HBase. An HBase cluster has one active
* master. If many masters are started, all compete. Whichever wins goes on to
@@ -1728,8 +1731,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (cpHost != null) {
cpHost.preModifyTable(tableName, descriptor);
}
+
LOG.info(getClientIdAuditPrefix() + " modify " + tableName);
- new ModifyTableHandler(tableName, descriptor, this, this).prepare().process();
+
+ // Execute the operation synchronously - wait for the operation completes before continuing.
+ long procId = this.procedureExecutor.submitProcedure(
+ new ModifyTableProcedure(procedureExecutor.getEnvironment(), descriptor));
+
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+
if (cpHost != null) {
cpHost.postModifyTable(tableName, descriptor);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 78e4c11..de28cdc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -583,10 +583,12 @@ public class MasterFileSystem {
Path familyDir = new Path(tableDir,
new Path(region.getEncodedName(), Bytes.toString(familyName)));
if (fs.delete(familyDir, true) == false) {
- throw new IOException("Could not delete family "
- + Bytes.toString(familyName) + " from FileSystem for region "
- + region.getRegionNameAsString() + "(" + region.getEncodedName()
- + ")");
+ if (fs.exists(familyDir)) {
+ throw new IOException("Could not delete family "
+ + Bytes.toString(familyName) + " from FileSystem for region "
+ + region.getRegionNameAsString() + "(" + region.getEncodedName()
+ + ")");
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
new file mode 100644
index 0000000..c6ff1b6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.BulkReOpen;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Helper class for schema change procedures
+ */
+@InterfaceAudience.Private
+public final class MasterDDLOperationHelper {
+ private static final Log LOG = LogFactory.getLog(MasterDDLOperationHelper.class);
+
+ private MasterDDLOperationHelper() {}
+
+ /**
+ * Check whether online schema change is allowed from config
+ **/
+ public static boolean isOnlineSchemaChangeAllowed(final MasterProcedureEnv env) {
+ return env.getMasterServices().getConfiguration()
+ .getBoolean("hbase.online.schema.update.enable", false);
+ }
+
+ /**
+ * Check whether a table is modifiable - exists and either offline or online with config set
+ * @param env MasterProcedureEnv
+ * @param tableName name of the table
+ * @throws IOException
+ */
+ public static void checkTableModifiable(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ // Checks whether the table exists
+ if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ throw new TableNotFoundException(tableName);
+ }
+
+ // We only execute this procedure with table online if online schema change config is set.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(tableName, TableState.State.DISABLED)
+ && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
+ throw new TableNotDisabledException(tableName);
+ }
+ }
+
+ /**
+ * Remove the column family from the file system
+ **/
+ public static void deleteColumnFamilyFromFileSystem(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ List regionInfoList,
+ final byte[] familyName) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName);
+ }
+ if (regionInfoList == null) {
+ regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName);
+ }
+ for (HRegionInfo hri : regionInfoList) {
+ // Delete the family directory in FS for all the regions one by one
+ mfs.deleteFamilyFromFS(hri, familyName);
+ }
+ }
+
+ /**
+ * Reopen all regions from a table after a schema change operation.
+ **/
+ public static boolean reOpenAllRegions(
+ final MasterProcedureEnv env,
+ final TableName tableName,
+ final List regionInfoList) throws IOException {
+ boolean done = false;
+ LOG.info("Bucketing regions by region server...");
+ List regionLocations = null;
+ Connection connection = env.getMasterServices().getConnection();
+ try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+ regionLocations = locator.getAllRegionLocations();
+ }
+ // Convert List to Map.
+ NavigableMap hri2Sn = new TreeMap();
+ for (HRegionLocation location : regionLocations) {
+ hri2Sn.put(location.getRegionInfo(), location.getServerName());
+ }
+ TreeMap> serverToRegions = Maps.newTreeMap();
+ List reRegions = new ArrayList();
+ for (HRegionInfo hri : regionInfoList) {
+ ServerName sn = hri2Sn.get(hri);
+ // Skip the offlined split parent region
+ // See HBASE-4578 for more information.
+ if (null == sn) {
+ LOG.info("Skip " + hri);
+ continue;
+ }
+ if (!serverToRegions.containsKey(sn)) {
+ LinkedList hriList = Lists.newLinkedList();
+ serverToRegions.put(sn, hriList);
+ }
+ reRegions.add(hri);
+ serverToRegions.get(sn).add(hri);
+ }
+
+ LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size()
+ + " region servers.");
+ AssignmentManager am = env.getMasterServices().getAssignmentManager();
+ am.setRegionsToReopen(reRegions);
+ BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am);
+ while (true) {
+ try {
+ if (bulkReopen.bulkReOpen()) {
+ done = true;
+ break;
+ } else {
+ LOG.warn("Timeout before reopening all regions");
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Reopen was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ break;
+ }
+ }
+ return done;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
new file mode 100644
index 0000000..f09b686
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -0,0 +1,522 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class ModifyTableProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class);
+
+ private AtomicBoolean aborted = new AtomicBoolean(false);
+ private HTableDescriptor unmodifiedHTableDescriptor = null;
+ private HTableDescriptor modifiedHTableDescriptor;
+ private UserGroupInformation user;
+ private List regionInfoList;
+ private boolean deleteColumnFamilyInModify;
+
+ private Boolean traceEnabled = null;
+
+ public ModifyTableProcedure() {
+ initilize();
+ }
+
+ public ModifyTableProcedure(
+ final MasterProcedureEnv env,
+ final HTableDescriptor htd) throws IOException {
+ initilize();
+ this.modifiedHTableDescriptor = htd;
+ this.user = env.getRequestUser().getUGI();
+ }
+
+ private void initilize() {
+ this.unmodifiedHTableDescriptor = null;
+ this.regionInfoList = null;
+ this.traceEnabled = null;
+ this.deleteColumnFamilyInModify = false;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableState state) {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case MODIFY_TABLE_PREPARE:
+ prepareModify(env);
+ setNextState(ModifyTableState.MODIFY_TABLE_PRE_OPERATION);
+ break;
+ case MODIFY_TABLE_PRE_OPERATION:
+ preModify(env, state);
+ setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR);
+ break;
+ case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR:
+ updateTableDescriptor(env);
+ setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN);
+ break;
+ case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
+ updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
+ if (deleteColumnFamilyInModify) {
+ setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
+ } else {
+ setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
+ }
+ break;
+ case MODIFY_TABLE_DELETE_FS_LAYOUT:
+ deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
+ setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
+ break;
+ case MODIFY_TABLE_POST_OPERATION:
+ postModify(env, state);
+ setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS);
+ break;
+ case MODIFY_TABLE_REOPEN_ALL_REGIONS:
+ reOpenAllRegionsIfTableIsOnline(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (InterruptedException|IOException e) {
+ if (!isRollbackSupported(state)) {
+ // We reach a state that cannot be rolled back. We just need to keep retry.
+ LOG.warn("Error trying to modify table=" + getTableName() + " state=" + state, e);
+ } else {
+ LOG.error("Error trying to modify table=" + getTableName() + " state=" + state, e);
+ setFailure("master-modify-table", e);
+ }
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case MODIFY_TABLE_REOPEN_ALL_REGIONS:
+ break; // Nothing to undo.
+ case MODIFY_TABLE_POST_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to un-modify?
+ break;
+ case MODIFY_TABLE_DELETE_FS_LAYOUT:
+ // Once we reach to this state - we could NOT rollback - as it is tricky to undelete
+ // the deleted files. We are not suppose to reach here, throw exception so that we know
+ // there is a code bug to investigate.
+ assert deleteColumnFamilyInModify;
+ throw new UnsupportedOperationException(this + " rollback of state=" + state
+ + " is unsupported.");
+ case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
+ // Undo the replica column update.
+ updateReplicaColumnsIfNeeded(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
+ break;
+ case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR:
+ restoreTableDescriptor(env);
+ break;
+ case MODIFY_TABLE_PRE_OPERATION:
+ // TODO-MAYBE: call the coprocessor event to un-modify?
+ break;
+ case MODIFY_TABLE_PREPARE:
+ break; // Nothing to undo.
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ LOG.warn("Fail trying to rollback modify table=" + getTableName() + " state=" + state, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected ModifyTableState getState(final int stateId) {
+ return ModifyTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final ModifyTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected ModifyTableState getInitialState() {
+ return ModifyTableState.MODIFY_TABLE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(final ModifyTableState state) {
+ if (aborted.get() && isRollbackSupported(state)) {
+ setAbortFailure("modify-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.isInitialized()) return false;
+ return env.getProcedureQueue().tryAcquireTableWrite(
+ getTableName(),
+ EventType.C_M_MODIFY_TABLE.toString());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.ModifyTableMessage.Builder modifyTableMsg =
+ MasterProcedureProtos.ModifyTableMessage.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user))
+ .setModifiedTableSchema(modifiedHTableDescriptor.convert())
+ .setDeleteColumnFamilyInModify(deleteColumnFamilyInModify);
+
+ if (unmodifiedHTableDescriptor != null) {
+ modifyTableMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert());
+ }
+
+ if (regionInfoList != null) {
+ for (HRegionInfo hri : regionInfoList) {
+ modifyTableMsg.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+
+ modifyTableMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.ModifyTableMessage modifyTableMsg =
+ MasterProcedureProtos.ModifyTableMessage.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo());
+ modifiedHTableDescriptor = HTableDescriptor.convert(modifyTableMsg.getModifiedTableSchema());
+ deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify();
+
+ if (modifyTableMsg.hasUnmodifiedTableSchema()) {
+ unmodifiedHTableDescriptor =
+ HTableDescriptor.convert(modifyTableMsg.getUnmodifiedTableSchema());
+ }
+
+ if (modifyTableMsg.getRegionInfoCount() == 0) {
+ regionInfoList = null;
+ } else {
+ regionInfoList = new ArrayList(modifyTableMsg.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri : modifyTableMsg.getRegionInfoList()) {
+ regionInfoList.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(getTableName());
+ sb.append(") user=");
+ sb.append(user);
+ }
+
+ @Override
+ public TableName getTableName() {
+ return modifiedHTableDescriptor.getTableName();
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Check conditions before any real action of modifying a table.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareModify(final MasterProcedureEnv env) throws IOException {
+ // Checks whether the table exists
+ if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) {
+ throw new TableNotFoundException(getTableName());
+ }
+
+ // In order to update the descriptor, we need to retrieve the old descriptor for comparison.
+ this.unmodifiedHTableDescriptor =
+ env.getMasterServices().getTableDescriptors().get(getTableName());
+
+ if (env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ // We only execute this procedure with table online if online schema change config is set.
+ if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
+ throw new TableNotDisabledException(getTableName());
+ }
+
+ if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor
+ .getRegionReplication()) {
+ throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
+ }
+ }
+
+ // Get the region info list before the real action.
+ this.regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName());
+
+ // Find out whether all column families in unmodifiedHTableDescriptor also exists in
+ // the modifiedHTableDescriptor. This is to determine whether we are safe to rollback.
+ final Set oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys();
+ final Set newFamilies = modifiedHTableDescriptor.getFamiliesKeys();
+ for (byte[] familyName : oldFamilies) {
+ if (!newFamilies.contains(familyName)) {
+ this.deleteColumnFamilyInModify = true;
+ break;
+ }
+ }
+ }
+
+ /**
+ * Action before modifying table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void preModify(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Update descriptor
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
+ }
+
+ /**
+ * Undo the descriptor change (for rollback)
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
+
+ // delete any new column families from the modifiedHTableDescriptor.
+ deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
+
+ // Make sure regions are opened after table descriptor is updated.
+ reOpenAllRegionsIfTableIsOnline(env);
+ }
+
+ /**
+ * Removes from hdfs the families that are not longer present in the new table descriptor.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void deleteFromFs(final MasterProcedureEnv env,
+ final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor)
+ throws IOException {
+ final Set oldFamilies = oldHTableDescriptor.getFamiliesKeys();
+ final Set newFamilies = newHTableDescriptor.getFamiliesKeys();
+ for (byte[] familyName : oldFamilies) {
+ if (!newFamilies.contains(familyName)) {
+ MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(
+ env,
+ getTableName(),
+ regionInfoList,
+ familyName);
+ }
+ }
+ }
+
+ /**
+ * update replica column families if necessary.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void updateReplicaColumnsIfNeeded(
+ final MasterProcedureEnv env,
+ final HTableDescriptor oldHTableDescriptor,
+ final HTableDescriptor newHTableDescriptor) throws IOException {
+ final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
+ final int newReplicaCount = newHTableDescriptor.getRegionReplication();
+
+ if (newReplicaCount < oldReplicaCount) {
+ Set tableRows = new HashSet();
+ Connection connection = env.getMasterServices().getConnection();
+ Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName());
+ scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+
+ try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
+ ResultScanner resScanner = metaTable.getScanner(scan);
+ for (Result result : resScanner) {
+ tableRows.add(result.getRow());
+ }
+ MetaTableAccessor.removeRegionReplicasFromMeta(
+ tableRows,
+ newReplicaCount,
+ oldReplicaCount - newReplicaCount,
+ connection);
+ }
+ }
+
+ // Setup replication for region replicas if needed
+ if (newReplicaCount > 1 && oldReplicaCount <= 1) {
+ ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
+ }
+ }
+
+ /**
+ * Action after modifying table.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void postModify(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException, InterruptedException {
+ runCoprocessorAction(env, state);
+ }
+
+ /**
+ * Last action from the procedure - executed when online schema change is supported.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException {
+ // This operation only run when the table is enabled.
+ if (!env.getMasterServices().getAssignmentManager().getTableStateManager()
+ .isTableState(getTableName(), TableState.State.ENABLED)) {
+ return;
+ }
+
+ if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), regionInfoList)) {
+ LOG.info("Completed modify table operation on table " + getTableName());
+ } else {
+ LOG.warn("Error on reopening the regions on table " + getTableName());
+ }
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled whether the trace is enabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+
+ /**
+ * Coprocessor Action.
+ * @param env MasterProcedureEnv
+ * @param state the procedure state
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ switch (state) {
+ case MODIFY_TABLE_PRE_OPERATION:
+ cpHost.preModifyTableHandler(getTableName(), modifiedHTableDescriptor);
+ break;
+ case MODIFY_TABLE_POST_OPERATION:
+ cpHost.postModifyTableHandler(getTableName(), modifiedHTableDescriptor);
+ break;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ return null;
+ }
+ });
+ }
+ }
+
+ /*
+ * Check whether we are in the state that can be rollback
+ */
+ private boolean isRollbackSupported(final ModifyTableState state) {
+ if (deleteColumnFamilyInModify) {
+ switch (state) {
+ case MODIFY_TABLE_DELETE_FS_LAYOUT:
+ case MODIFY_TABLE_POST_OPERATION:
+ case MODIFY_TABLE_REOPEN_ALL_REGIONS:
+ // It is not safe to rollback if we reach to these states.
+ return false;
+ default:
+ break;
+ }
+ }
+ return true;
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
new file mode 100644
index 0000000..af29338
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java
@@ -0,0 +1,403 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestModifyTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestModifyTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout=60000)
+ public void testModifyTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyTable");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // Modify the table descriptor
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+
+ // Test 1: Modify 1 property
+ long newMaxFileSize = htd.getMaxFileSize() * 2;
+ htd.setMaxFileSize(newMaxFileSize);
+ htd.setRegionReplication(3);
+
+ long procId1 = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
+
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newMaxFileSize, currentHtd.getMaxFileSize());
+
+ // Test 2: Modify multiple properties
+ boolean newReadOnlyOption = htd.isReadOnly() ? false : true;
+ long newMemStoreFlushSize = htd.getMemStoreFlushSize() * 2;
+ htd.setReadOnly(newReadOnlyOption);
+ htd.setMemStoreFlushSize(newMemStoreFlushSize);
+
+ long procId2 = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newReadOnlyOption, currentHtd.isReadOnly());
+ assertEquals(newMemStoreFlushSize, currentHtd.getMemStoreFlushSize());
+ }
+
+ @Test(timeout = 60000)
+ public void testModifyTableAddCF() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyTableAddCF");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1");
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(1, currentHtd.getFamiliesKeys().size());
+
+ // Test 1: Modify the table descriptor online
+ String cf2 = "cf2";
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd.addFamily(new HColumnDescriptor(cf2));
+
+ long procId = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+ assertTrue(currentHtd.hasFamily(cf2.getBytes()));
+
+ // Test 2: Modify the table descriptor offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ String cf3 = "cf3";
+ HTableDescriptor htd2 =
+ new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd2.addFamily(new HColumnDescriptor(cf3));
+
+ long procId2 =
+ ProcedureTestingUtility.submitAndWait(procExec,
+ new ModifyTableProcedure(procExec.getEnvironment(), htd2));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertTrue(currentHtd.hasFamily(cf3.getBytes()));
+ assertEquals(3, currentHtd.getFamiliesKeys().size());
+ }
+
+ @Test(timeout = 60000)
+ public void testModifyTableDeleteCF() throws Exception {
+ final TableName tableName = TableName.valueOf("testModifyTableAddCF");
+ final String cf2 = "cf2";
+ final String cf3 = "cf3";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1", cf2, cf3);
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(3, currentHtd.getFamiliesKeys().size());
+
+ // Test 1: Modify the table descriptor
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd.removeFamily(cf2.getBytes());
+
+ long procId = ProcedureTestingUtility.submitAndWait(
+ procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+ assertFalse(currentHtd.hasFamily(cf2.getBytes()));
+
+ // Test 2: Modify the table descriptor offline
+ UTIL.getHBaseAdmin().disableTable(tableName);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+
+ HTableDescriptor htd2 =
+ new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd2.removeFamily(cf3.getBytes());
+
+ long procId2 =
+ ProcedureTestingUtility.submitAndWait(procExec,
+ new ModifyTableProcedure(procExec.getEnvironment(), htd2));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
+
+ currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(1, currentHtd.getFamiliesKeys().size());
+ assertFalse(currentHtd.hasFamily(cf3.getBytes()));
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline");
+ final String cf2 = "cf2";
+ final String cf3 = "cf3";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1", cf3);
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Modify multiple properties of the table.
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(cf2));
+ htd.removeFamily(cf3.getBytes());
+ htd.setRegionReplication(3);
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // Validate descriptor
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+
+ // cf2 should be added cf3 should be removed
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, false, "cf1", cf2);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
+ final String cf2 = "cf2";
+ final String cf3 = "cf3";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1", cf3);
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Modify multiple properties of the table.
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(cf2));
+ htd.removeFamily(cf3.getBytes());
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyTableState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
+ ModifyTableState.values());
+
+ // Validate descriptor
+ HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
+ assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled());
+ assertEquals(2, currentHtd.getFamiliesKeys().size());
+ assertTrue(currentHtd.hasFamily(cf2.getBytes()));
+ assertFalse(currentHtd.hasFamily(cf3.getBytes()));
+
+ // cf2 should be added cf3 should be removed
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, "cf1", cf2);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionOnline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String familyName = "cf2";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1");
+
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(familyName));
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and rollback the step twice
+ int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // cf2 should not be present
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, "cf1");
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionOffline() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+ final String familyName = "cf2";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "cf1");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
+ htd.setCompactionEnabled(newCompactionEnableOption);
+ htd.addFamily(new HColumnDescriptor(familyName));
+ htd.setRegionReplication(3);
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Restart the executor and rollback the step twice
+ int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // cf2 should not be present
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, "cf1");
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
+ final String familyToAddName = "cf2";
+ final String familyToRemove = "cf1";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ // create the table
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, familyToRemove);
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
+ htd.setCompactionEnabled(!htd.isCompactionEnabled());
+ htd.addFamily(new HColumnDescriptor(familyToAddName));
+ htd.removeFamily(familyToRemove.getBytes());
+ htd.setRegionReplication(3);
+
+ // Start the Modify procedure && kill the executor
+ long procId =
+ procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd));
+
+ // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
+ // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ int numberOfSteps = 5;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyTableState.values());
+
+ // "cf2" should be added and "cf1" should be removed
+ MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
+ tableName, regions, false, familyToAddName);
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}