diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
index 9bf4c98..d40c1f7 100644
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -499,6 +499,315 @@ public final class MasterProcedureProtos {
}
/**
+ * Protobuf enum {@code hbase.pb.CreateNamespaceState}
+ */
+ public enum CreateNamespaceState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * CREATE_NAMESPACE_PREPARE = 1;
+ */
+ CREATE_NAMESPACE_PREPARE(0, 1),
+ /**
+ * CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ */
+ CREATE_NAMESPACE_CREATE_DIRECTORY(1, 2),
+ /**
+ * CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ */
+ CREATE_NAMESPACE_INSERT_INTO_NS_TABLE(2, 3),
+ /**
+ * CREATE_NAMESPACE_UPDATE_ZK = 4;
+ */
+ CREATE_NAMESPACE_UPDATE_ZK(3, 4),
+ /**
+ * CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+ */
+ CREATE_NAMESPACE_SET_NAMESPACE_QUOTA(4, 5),
+ ;
+
+ /**
+ * CREATE_NAMESPACE_PREPARE = 1;
+ */
+ public static final int CREATE_NAMESPACE_PREPARE_VALUE = 1;
+ /**
+ * CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ */
+ public static final int CREATE_NAMESPACE_CREATE_DIRECTORY_VALUE = 2;
+ /**
+ * CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ */
+ public static final int CREATE_NAMESPACE_INSERT_INTO_NS_TABLE_VALUE = 3;
+ /**
+ * CREATE_NAMESPACE_UPDATE_ZK = 4;
+ */
+ public static final int CREATE_NAMESPACE_UPDATE_ZK_VALUE = 4;
+ /**
+ * CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+ */
+ public static final int CREATE_NAMESPACE_SET_NAMESPACE_QUOTA_VALUE = 5;
+
+
+ public final int getNumber() { return value; }
+
+ public static CreateNamespaceState valueOf(int value) {
+ switch (value) {
+ case 1: return CREATE_NAMESPACE_PREPARE;
+ case 2: return CREATE_NAMESPACE_CREATE_DIRECTORY;
+ case 3: return CREATE_NAMESPACE_INSERT_INTO_NS_TABLE;
+ case 4: return CREATE_NAMESPACE_UPDATE_ZK;
+ case 5: return CREATE_NAMESPACE_SET_NAMESPACE_QUOTA;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public CreateNamespaceState findValueByNumber(int number) {
+ return CreateNamespaceState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+ }
+
+ private static final CreateNamespaceState[] VALUES = values();
+
+ public static CreateNamespaceState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private CreateNamespaceState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.CreateNamespaceState)
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.ModifyNamespaceState}
+ */
+ public enum ModifyNamespaceState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * MODIFY_NAMESPACE_PREPARE = 1;
+ */
+ MODIFY_NAMESPACE_PREPARE(0, 1),
+ /**
+ * MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ */
+ MODIFY_NAMESPACE_UPDATE_NS_TABLE(1, 2),
+ /**
+ * MODIFY_NAMESPACE_UPDATE_ZK = 3;
+ */
+ MODIFY_NAMESPACE_UPDATE_ZK(2, 3),
+ ;
+
+ /**
+ * MODIFY_NAMESPACE_PREPARE = 1;
+ */
+ public static final int MODIFY_NAMESPACE_PREPARE_VALUE = 1;
+ /**
+ * MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ */
+ public static final int MODIFY_NAMESPACE_UPDATE_NS_TABLE_VALUE = 2;
+ /**
+ * MODIFY_NAMESPACE_UPDATE_ZK = 3;
+ */
+ public static final int MODIFY_NAMESPACE_UPDATE_ZK_VALUE = 3;
+
+
+ public final int getNumber() { return value; }
+
+ public static ModifyNamespaceState valueOf(int value) {
+ switch (value) {
+ case 1: return MODIFY_NAMESPACE_PREPARE;
+ case 2: return MODIFY_NAMESPACE_UPDATE_NS_TABLE;
+ case 3: return MODIFY_NAMESPACE_UPDATE_ZK;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public ModifyNamespaceState findValueByNumber(int number) {
+ return ModifyNamespaceState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
+ }
+
+ private static final ModifyNamespaceState[] VALUES = values();
+
+ public static ModifyNamespaceState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private ModifyNamespaceState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.ModifyNamespaceState)
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.DeleteNamespaceState}
+ */
+ public enum DeleteNamespaceState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * DELETE_NAMESPACE_PREPARE = 1;
+ */
+ DELETE_NAMESPACE_PREPARE(0, 1),
+ /**
+ * DELETE_NAMESPACE_DELETE_FROM_NS_TABLE = 2;
+ */
+ DELETE_NAMESPACE_DELETE_FROM_NS_TABLE(1, 2),
+ /**
+ * DELETE_NAMESPACE_REMOVE_FROM_ZK = 3;
+ */
+ DELETE_NAMESPACE_REMOVE_FROM_ZK(2, 3),
+ /**
+ * DELETE_NAMESPACE_DELETE_DIRECTORIES = 4;
+ */
+ DELETE_NAMESPACE_DELETE_DIRECTORIES(3, 4),
+ /**
+ * DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA = 5;
+ */
+ DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA(4, 5),
+ ;
+
+ /**
+ * DELETE_NAMESPACE_PREPARE = 1;
+ */
+ public static final int DELETE_NAMESPACE_PREPARE_VALUE = 1;
+ /**
+ * DELETE_NAMESPACE_DELETE_FROM_NS_TABLE = 2;
+ */
+ public static final int DELETE_NAMESPACE_DELETE_FROM_NS_TABLE_VALUE = 2;
+ /**
+ * DELETE_NAMESPACE_REMOVE_FROM_ZK = 3;
+ */
+ public static final int DELETE_NAMESPACE_REMOVE_FROM_ZK_VALUE = 3;
+ /**
+ * DELETE_NAMESPACE_DELETE_DIRECTORIES = 4;
+ */
+ public static final int DELETE_NAMESPACE_DELETE_DIRECTORIES_VALUE = 4;
+ /**
+ * DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA = 5;
+ */
+ public static final int DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA_VALUE = 5;
+
+
+ public final int getNumber() { return value; }
+
+ public static DeleteNamespaceState valueOf(int value) {
+ switch (value) {
+ case 1: return DELETE_NAMESPACE_PREPARE;
+ case 2: return DELETE_NAMESPACE_DELETE_FROM_NS_TABLE;
+ case 3: return DELETE_NAMESPACE_REMOVE_FROM_ZK;
+ case 4: return DELETE_NAMESPACE_DELETE_DIRECTORIES;
+ case 5: return DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public DeleteNamespaceState findValueByNumber(int number) {
+ return DeleteNamespaceState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
+ }
+
+ private static final DeleteNamespaceState[] VALUES = values();
+
+ public static DeleteNamespaceState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private DeleteNamespaceState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.DeleteNamespaceState)
+ }
+
+ /**
* Protobuf enum {@code hbase.pb.AddColumnFamilyState}
*/
public enum AddColumnFamilyState
@@ -582,7 +891,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
}
private static final AddColumnFamilyState[] VALUES = values();
@@ -691,7 +1000,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(8);
}
private static final ModifyColumnFamilyState[] VALUES = values();
@@ -809,7 +1118,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(9);
}
private static final DeleteColumnFamilyState[] VALUES = values();
@@ -927,7 +1236,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(10);
}
private static final EnableTableState[] VALUES = values();
@@ -1045,7 +1354,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(8);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(11);
}
private static final DisableTableState[] VALUES = values();
@@ -1198,7 +1507,7 @@ public final class MasterProcedureProtos {
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(9);
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(12);
}
private static final ServerCrashState[] VALUES = values();
@@ -5839,258 +6148,2323 @@ public final class MasterProcedureProtos {
isClean());
tableName_ = null;
}
- return tableNameBuilder_;
+ return tableNameBuilder_;
+ }
+
+ // repeated .hbase.pb.RegionInfo region_info = 3;
+ private java.util.List regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList(regionInfo_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public java.util.List getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
+ } else {
+ return regionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder addAllRegionInfo(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
+ } else {
+ regionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.RegionInfo region_info = 3;
+ */
+ public java.util.List
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.DeleteTableStateData)
+ }
+
+ static {
+ defaultInstance = new DeleteTableStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.DeleteTableStateData)
+ }
+
+ public interface CreateNamespaceStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ boolean hasNamespaceDescriptor();
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor();
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CreateNamespaceStateData}
+ */
+ public static final class CreateNamespaceStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements CreateNamespaceStateDataOrBuilder {
+ // Use CreateNamespaceStateData.newBuilder() to construct.
+ private CreateNamespaceStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CreateNamespaceStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CreateNamespaceStateData defaultInstance;
+ public static CreateNamespaceStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CreateNamespaceStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CreateNamespaceStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = namespaceDescriptor_.toBuilder();
+ }
+ namespaceDescriptor_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(namespaceDescriptor_);
+ namespaceDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CreateNamespaceStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CreateNamespaceStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CreateNamespaceStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CreateNamespaceStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ public static final int NAMESPACE_DESCRIPTOR_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_;
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public boolean hasNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() {
+ return namespaceDescriptor_;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() {
+ return namespaceDescriptor_;
+ }
+
+ private void initFields() {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNamespaceDescriptor()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getNamespaceDescriptor().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, namespaceDescriptor_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, namespaceDescriptor_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData) obj;
+
+ boolean result = true;
+ result = result && (hasNamespaceDescriptor() == other.hasNamespaceDescriptor());
+ if (hasNamespaceDescriptor()) {
+ result = result && getNamespaceDescriptor()
+ .equals(other.getNamespaceDescriptor());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasNamespaceDescriptor()) {
+ hash = (37 * hash) + NAMESPACE_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getNamespaceDescriptor().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CreateNamespaceStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CreateNamespaceStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CreateNamespaceStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getNamespaceDescriptorFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ } else {
+ namespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CreateNamespaceStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (namespaceDescriptorBuilder_ == null) {
+ result.namespaceDescriptor_ = namespaceDescriptor_;
+ } else {
+ result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData.getDefaultInstance()) return this;
+ if (other.hasNamespaceDescriptor()) {
+ mergeNamespaceDescriptor(other.getNamespaceDescriptor());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNamespaceDescriptor()) {
+
+ return false;
+ }
+ if (!getNamespaceDescriptor().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_;
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public boolean hasNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() {
+ if (namespaceDescriptorBuilder_ == null) {
+ return namespaceDescriptor_;
+ } else {
+ return namespaceDescriptorBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder setNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (namespaceDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ namespaceDescriptor_ = value;
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder setNamespaceDescriptor(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = builderForValue.build();
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder mergeNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (namespaceDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ namespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) {
+ namespaceDescriptor_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(namespaceDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ namespaceDescriptor_ = value;
+ }
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder clearNamespaceDescriptor() {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getNamespaceDescriptorFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() {
+ if (namespaceDescriptorBuilder_ != null) {
+ return namespaceDescriptorBuilder_.getMessageOrBuilder();
+ } else {
+ return namespaceDescriptor_;
+ }
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>
+ getNamespaceDescriptorFieldBuilder() {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>(
+ namespaceDescriptor_,
+ getParentForChildren(),
+ isClean());
+ namespaceDescriptor_ = null;
+ }
+ return namespaceDescriptorBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CreateNamespaceStateData)
+ }
+
+ static {
+ defaultInstance = new CreateNamespaceStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CreateNamespaceStateData)
+ }
+
+ public interface ModifyNamespaceStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ boolean hasNamespaceDescriptor();
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor();
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder();
+
+ // optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ boolean hasUnmodifiedNamespaceDescriptor();
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getUnmodifiedNamespaceDescriptor();
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getUnmodifiedNamespaceDescriptorOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.ModifyNamespaceStateData}
+ */
+ public static final class ModifyNamespaceStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements ModifyNamespaceStateDataOrBuilder {
+ // Use ModifyNamespaceStateData.newBuilder() to construct.
+ private ModifyNamespaceStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private ModifyNamespaceStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final ModifyNamespaceStateData defaultInstance;
+ public static ModifyNamespaceStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ModifyNamespaceStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ModifyNamespaceStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = namespaceDescriptor_.toBuilder();
+ }
+ namespaceDescriptor_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(namespaceDescriptor_);
+ namespaceDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = unmodifiedNamespaceDescriptor_.toBuilder();
+ }
+ unmodifiedNamespaceDescriptor_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(unmodifiedNamespaceDescriptor_);
+ unmodifiedNamespaceDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_ModifyNamespaceStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_ModifyNamespaceStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public ModifyNamespaceStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new ModifyNamespaceStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ public static final int NAMESPACE_DESCRIPTOR_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_;
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public boolean hasNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() {
+ return namespaceDescriptor_;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() {
+ return namespaceDescriptor_;
+ }
+
+ // optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ public static final int UNMODIFIED_NAMESPACE_DESCRIPTOR_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor unmodifiedNamespaceDescriptor_;
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public boolean hasUnmodifiedNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getUnmodifiedNamespaceDescriptor() {
+ return unmodifiedNamespaceDescriptor_;
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getUnmodifiedNamespaceDescriptorOrBuilder() {
+ return unmodifiedNamespaceDescriptor_;
+ }
+
+ private void initFields() {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ unmodifiedNamespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNamespaceDescriptor()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getNamespaceDescriptor().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasUnmodifiedNamespaceDescriptor()) {
+ if (!getUnmodifiedNamespaceDescriptor().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, namespaceDescriptor_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, unmodifiedNamespaceDescriptor_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, namespaceDescriptor_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, unmodifiedNamespaceDescriptor_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData) obj;
+
+ boolean result = true;
+ result = result && (hasNamespaceDescriptor() == other.hasNamespaceDescriptor());
+ if (hasNamespaceDescriptor()) {
+ result = result && getNamespaceDescriptor()
+ .equals(other.getNamespaceDescriptor());
+ }
+ result = result && (hasUnmodifiedNamespaceDescriptor() == other.hasUnmodifiedNamespaceDescriptor());
+ if (hasUnmodifiedNamespaceDescriptor()) {
+ result = result && getUnmodifiedNamespaceDescriptor()
+ .equals(other.getUnmodifiedNamespaceDescriptor());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasNamespaceDescriptor()) {
+ hash = (37 * hash) + NAMESPACE_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getNamespaceDescriptor().hashCode();
+ }
+ if (hasUnmodifiedNamespaceDescriptor()) {
+ hash = (37 * hash) + UNMODIFIED_NAMESPACE_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getUnmodifiedNamespaceDescriptor().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.ModifyNamespaceStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_ModifyNamespaceStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_ModifyNamespaceStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getNamespaceDescriptorFieldBuilder();
+ getUnmodifiedNamespaceDescriptorFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ } else {
+ namespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ unmodifiedNamespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ } else {
+ unmodifiedNamespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_ModifyNamespaceStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (namespaceDescriptorBuilder_ == null) {
+ result.namespaceDescriptor_ = namespaceDescriptor_;
+ } else {
+ result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ result.unmodifiedNamespaceDescriptor_ = unmodifiedNamespaceDescriptor_;
+ } else {
+ result.unmodifiedNamespaceDescriptor_ = unmodifiedNamespaceDescriptorBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData.getDefaultInstance()) return this;
+ if (other.hasNamespaceDescriptor()) {
+ mergeNamespaceDescriptor(other.getNamespaceDescriptor());
+ }
+ if (other.hasUnmodifiedNamespaceDescriptor()) {
+ mergeUnmodifiedNamespaceDescriptor(other.getUnmodifiedNamespaceDescriptor());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNamespaceDescriptor()) {
+
+ return false;
+ }
+ if (!getNamespaceDescriptor().isInitialized()) {
+
+ return false;
+ }
+ if (hasUnmodifiedNamespaceDescriptor()) {
+ if (!getUnmodifiedNamespaceDescriptor().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_;
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public boolean hasNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() {
+ if (namespaceDescriptorBuilder_ == null) {
+ return namespaceDescriptor_;
+ } else {
+ return namespaceDescriptorBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder setNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (namespaceDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ namespaceDescriptor_ = value;
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder setNamespaceDescriptor(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = builderForValue.build();
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder mergeNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (namespaceDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ namespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) {
+ namespaceDescriptor_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(namespaceDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ namespaceDescriptor_ = value;
+ }
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public Builder clearNamespaceDescriptor() {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ onChanged();
+ } else {
+ namespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getNamespaceDescriptorFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() {
+ if (namespaceDescriptorBuilder_ != null) {
+ return namespaceDescriptorBuilder_.getMessageOrBuilder();
+ } else {
+ return namespaceDescriptor_;
+ }
+ }
+ /**
+ * required .hbase.pb.NamespaceDescriptor namespace_descriptor = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>
+ getNamespaceDescriptorFieldBuilder() {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>(
+ namespaceDescriptor_,
+ getParentForChildren(),
+ isClean());
+ namespaceDescriptor_ = null;
+ }
+ return namespaceDescriptorBuilder_;
+ }
+
+ // optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor unmodifiedNamespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> unmodifiedNamespaceDescriptorBuilder_;
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public boolean hasUnmodifiedNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getUnmodifiedNamespaceDescriptor() {
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ return unmodifiedNamespaceDescriptor_;
+ } else {
+ return unmodifiedNamespaceDescriptorBuilder_.getMessage();
+ }
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public Builder setUnmodifiedNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ unmodifiedNamespaceDescriptor_ = value;
+ onChanged();
+ } else {
+ unmodifiedNamespaceDescriptorBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public Builder setUnmodifiedNamespaceDescriptor(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) {
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ unmodifiedNamespaceDescriptor_ = builderForValue.build();
+ onChanged();
+ } else {
+ unmodifiedNamespaceDescriptorBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public Builder mergeUnmodifiedNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ unmodifiedNamespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) {
+ unmodifiedNamespaceDescriptor_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(unmodifiedNamespaceDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ unmodifiedNamespaceDescriptor_ = value;
+ }
+ onChanged();
+ } else {
+ unmodifiedNamespaceDescriptorBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public Builder clearUnmodifiedNamespaceDescriptor() {
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ unmodifiedNamespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ onChanged();
+ } else {
+ unmodifiedNamespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getUnmodifiedNamespaceDescriptorBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getUnmodifiedNamespaceDescriptorFieldBuilder().getBuilder();
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getUnmodifiedNamespaceDescriptorOrBuilder() {
+ if (unmodifiedNamespaceDescriptorBuilder_ != null) {
+ return unmodifiedNamespaceDescriptorBuilder_.getMessageOrBuilder();
+ } else {
+ return unmodifiedNamespaceDescriptor_;
+ }
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor unmodified_namespace_descriptor = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>
+ getUnmodifiedNamespaceDescriptorFieldBuilder() {
+ if (unmodifiedNamespaceDescriptorBuilder_ == null) {
+ unmodifiedNamespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>(
+ unmodifiedNamespaceDescriptor_,
+ getParentForChildren(),
+ isClean());
+ unmodifiedNamespaceDescriptor_ = null;
+ }
+ return unmodifiedNamespaceDescriptorBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.ModifyNamespaceStateData)
+ }
+
+ static {
+ defaultInstance = new ModifyNamespaceStateData(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.ModifyNamespaceStateData)
+ }
+
+ public interface DeleteNamespaceStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string namespace_name = 1;
+ /**
+ * required string namespace_name = 1;
+ */
+ boolean hasNamespaceName();
+ /**
+ * required string namespace_name = 1;
+ */
+ java.lang.String getNamespaceName();
+ /**
+ * required string namespace_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getNamespaceNameBytes();
+
+ // optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ /**
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ */
+ boolean hasNamespaceDescriptor();
+ /**
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor();
+ /**
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.DeleteNamespaceStateData}
+ */
+ public static final class DeleteNamespaceStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements DeleteNamespaceStateDataOrBuilder {
+ // Use DeleteNamespaceStateData.newBuilder() to construct.
+ private DeleteNamespaceStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DeleteNamespaceStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DeleteNamespaceStateData defaultInstance;
+ public static DeleteNamespaceStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DeleteNamespaceStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DeleteNamespaceStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ namespaceName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = namespaceDescriptor_.toBuilder();
+ }
+ namespaceDescriptor_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(namespaceDescriptor_);
+ namespaceDescriptor_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DeleteNamespaceStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DeleteNamespaceStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public DeleteNamespaceStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DeleteNamespaceStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string namespace_name = 1;
+ public static final int NAMESPACE_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object namespaceName_;
+ /**
+ * required string namespace_name = 1;
+ */
+ public boolean hasNamespaceName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string namespace_name = 1;
+ */
+ public java.lang.String getNamespaceName() {
+ java.lang.Object ref = namespaceName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ namespaceName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string namespace_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getNamespaceNameBytes() {
+ java.lang.Object ref = namespaceName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ namespaceName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ public static final int NAMESPACE_DESCRIPTOR_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_;
+ /**
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ */
+ public boolean hasNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() {
+ return namespaceDescriptor_;
+ }
+ /**
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() {
+ return namespaceDescriptor_;
+ }
+
+ private void initFields() {
+ namespaceName_ = "";
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNamespaceName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (hasNamespaceDescriptor()) {
+ if (!getNamespaceDescriptor().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getNamespaceNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, namespaceDescriptor_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getNamespaceNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, namespaceDescriptor_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData) obj;
+
+ boolean result = true;
+ result = result && (hasNamespaceName() == other.hasNamespaceName());
+ if (hasNamespaceName()) {
+ result = result && getNamespaceName()
+ .equals(other.getNamespaceName());
+ }
+ result = result && (hasNamespaceDescriptor() == other.hasNamespaceDescriptor());
+ if (hasNamespaceDescriptor()) {
+ result = result && getNamespaceDescriptor()
+ .equals(other.getNamespaceDescriptor());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasNamespaceName()) {
+ hash = (37 * hash) + NAMESPACE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getNamespaceName().hashCode();
+ }
+ if (hasNamespaceDescriptor()) {
+ hash = (37 * hash) + NAMESPACE_DESCRIPTOR_FIELD_NUMBER;
+ hash = (53 * hash) + getNamespaceDescriptor().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.DeleteNamespaceStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DeleteNamespaceStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DeleteNamespaceStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getNamespaceDescriptorFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ namespaceName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ } else {
+ namespaceDescriptorBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DeleteNamespaceStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.namespaceName_ = namespaceName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (namespaceDescriptorBuilder_ == null) {
+ result.namespaceDescriptor_ = namespaceDescriptor_;
+ } else {
+ result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
}
- // repeated .hbase.pb.RegionInfo region_info = 3;
- private java.util.List regionInfo_ =
- java.util.Collections.emptyList();
- private void ensureRegionInfoIsMutable() {
- if (!((bitField0_ & 0x00000004) == 0x00000004)) {
- regionInfo_ = new java.util.ArrayList(regionInfo_);
- bitField0_ |= 0x00000004;
- }
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData.getDefaultInstance()) return this;
+ if (other.hasNamespaceName()) {
+ bitField0_ |= 0x00000001;
+ namespaceName_ = other.namespaceName_;
+ onChanged();
+ }
+ if (other.hasNamespaceDescriptor()) {
+ mergeNamespaceDescriptor(other.getNamespaceDescriptor());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
}
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+ public final boolean isInitialized() {
+ if (!hasNamespaceName()) {
+
+ return false;
+ }
+ if (hasNamespaceDescriptor()) {
+ if (!getNamespaceDescriptor().isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+ // required string namespace_name = 1;
+ private java.lang.Object namespaceName_ = "";
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * required string namespace_name = 1;
*/
- public java.util.List getRegionInfoList() {
- if (regionInfoBuilder_ == null) {
- return java.util.Collections.unmodifiableList(regionInfo_);
- } else {
- return regionInfoBuilder_.getMessageList();
- }
+ public boolean hasNamespaceName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * required string namespace_name = 1;
*/
- public int getRegionInfoCount() {
- if (regionInfoBuilder_ == null) {
- return regionInfo_.size();
+ public java.lang.String getNamespaceName() {
+ java.lang.Object ref = namespaceName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ namespaceName_ = s;
+ return s;
} else {
- return regionInfoBuilder_.getCount();
+ return (java.lang.String) ref;
}
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * required string namespace_name = 1;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
- if (regionInfoBuilder_ == null) {
- return regionInfo_.get(index);
+ public com.google.protobuf.ByteString
+ getNamespaceNameBytes() {
+ java.lang.Object ref = namespaceName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ namespaceName_ = b;
+ return b;
} else {
- return regionInfoBuilder_.getMessage(index);
+ return (com.google.protobuf.ByteString) ref;
}
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * required string namespace_name = 1;
*/
- public Builder setRegionInfo(
- int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
- if (regionInfoBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRegionInfoIsMutable();
- regionInfo_.set(index, value);
- onChanged();
- } else {
- regionInfoBuilder_.setMessage(index, value);
- }
+ public Builder setNamespaceName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ namespaceName_ = value;
+ onChanged();
return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * required string namespace_name = 1;
*/
- public Builder setRegionInfo(
- int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
- if (regionInfoBuilder_ == null) {
- ensureRegionInfoIsMutable();
- regionInfo_.set(index, builderForValue.build());
- onChanged();
- } else {
- regionInfoBuilder_.setMessage(index, builderForValue.build());
- }
+ public Builder clearNamespaceName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ namespaceName_ = getDefaultInstance().getNamespaceName();
+ onChanged();
return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * required string namespace_name = 1;
*/
- public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
- if (regionInfoBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRegionInfoIsMutable();
- regionInfo_.add(value);
- onChanged();
- } else {
- regionInfoBuilder_.addMessage(value);
- }
+ public Builder setNamespaceNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ namespaceName_ = value;
+ onChanged();
return this;
}
+
+ // optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_;
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public Builder addRegionInfo(
- int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
- if (regionInfoBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureRegionInfoIsMutable();
- regionInfo_.add(index, value);
- onChanged();
- } else {
- regionInfoBuilder_.addMessage(index, value);
- }
- return this;
+ public boolean hasNamespaceDescriptor() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public Builder addRegionInfo(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
- if (regionInfoBuilder_ == null) {
- ensureRegionInfoIsMutable();
- regionInfo_.add(builderForValue.build());
- onChanged();
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() {
+ if (namespaceDescriptorBuilder_ == null) {
+ return namespaceDescriptor_;
} else {
- regionInfoBuilder_.addMessage(builderForValue.build());
+ return namespaceDescriptorBuilder_.getMessage();
}
- return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public Builder addRegionInfo(
- int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
- if (regionInfoBuilder_ == null) {
- ensureRegionInfoIsMutable();
- regionInfo_.add(index, builderForValue.build());
+ public Builder setNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (namespaceDescriptorBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ namespaceDescriptor_ = value;
onChanged();
} else {
- regionInfoBuilder_.addMessage(index, builderForValue.build());
+ namespaceDescriptorBuilder_.setMessage(value);
}
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public Builder addAllRegionInfo(
- java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
- if (regionInfoBuilder_ == null) {
- ensureRegionInfoIsMutable();
- super.addAll(values, regionInfo_);
+ public Builder setNamespaceDescriptor(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = builderForValue.build();
onChanged();
} else {
- regionInfoBuilder_.addAllMessages(values);
+ namespaceDescriptorBuilder_.setMessage(builderForValue.build());
}
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public Builder clearRegionInfo() {
- if (regionInfoBuilder_ == null) {
- regionInfo_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x00000004);
+ public Builder mergeNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) {
+ if (namespaceDescriptorBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ namespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) {
+ namespaceDescriptor_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(namespaceDescriptor_).mergeFrom(value).buildPartial();
+ } else {
+ namespaceDescriptor_ = value;
+ }
onChanged();
} else {
- regionInfoBuilder_.clear();
+ namespaceDescriptorBuilder_.mergeFrom(value);
}
+ bitField0_ |= 0x00000002;
return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public Builder removeRegionInfo(int index) {
- if (regionInfoBuilder_ == null) {
- ensureRegionInfoIsMutable();
- regionInfo_.remove(index);
+ public Builder clearNamespaceDescriptor() {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
onChanged();
} else {
- regionInfoBuilder_.remove(index);
+ namespaceDescriptorBuilder_.clear();
}
+ bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
- */
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
- int index) {
- return getRegionInfoFieldBuilder().getBuilder(index);
- }
- /**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
- int index) {
- if (regionInfoBuilder_ == null) {
- return regionInfo_.get(index); } else {
- return regionInfoBuilder_.getMessageOrBuilder(index);
- }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getNamespaceDescriptorFieldBuilder().getBuilder();
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
- getRegionInfoOrBuilderList() {
- if (regionInfoBuilder_ != null) {
- return regionInfoBuilder_.getMessageOrBuilderList();
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() {
+ if (namespaceDescriptorBuilder_ != null) {
+ return namespaceDescriptorBuilder_.getMessageOrBuilder();
} else {
- return java.util.Collections.unmodifiableList(regionInfo_);
+ return namespaceDescriptor_;
}
}
/**
- * repeated .hbase.pb.RegionInfo region_info = 3;
- */
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
- return getRegionInfoFieldBuilder().addBuilder(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
- }
- /**
- * repeated .hbase.pb.RegionInfo region_info = 3;
- */
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
- int index) {
- return getRegionInfoFieldBuilder().addBuilder(
- index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
- }
- /**
- * repeated .hbase.pb.RegionInfo region_info = 3;
+ * optional .hbase.pb.NamespaceDescriptor namespace_descriptor = 2;
*/
- public java.util.List
- getRegionInfoBuilderList() {
- return getRegionInfoFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
- getRegionInfoFieldBuilder() {
- if (regionInfoBuilder_ == null) {
- regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
- regionInfo_,
- ((bitField0_ & 0x00000004) == 0x00000004),
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>
+ getNamespaceDescriptorFieldBuilder() {
+ if (namespaceDescriptorBuilder_ == null) {
+ namespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>(
+ namespaceDescriptor_,
getParentForChildren(),
isClean());
- regionInfo_ = null;
+ namespaceDescriptor_ = null;
}
- return regionInfoBuilder_;
+ return namespaceDescriptorBuilder_;
}
- // @@protoc_insertion_point(builder_scope:hbase.pb.DeleteTableStateData)
+ // @@protoc_insertion_point(builder_scope:hbase.pb.DeleteNamespaceStateData)
}
static {
- defaultInstance = new DeleteTableStateData(true);
+ defaultInstance = new DeleteNamespaceStateData(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:hbase.pb.DeleteTableStateData)
+ // @@protoc_insertion_point(class_scope:hbase.pb.DeleteNamespaceStateData)
}
public interface AddColumnFamilyStateDataOrBuilder
@@ -12965,6 +15339,21 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_DeleteTableStateData_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_CreateNamespaceStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_CreateNamespaceStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_ModifyNamespaceStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_ModifyNamespaceStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_DeleteNamespaceStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_DeleteNamespaceStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_AddColumnFamilyStateData_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -13023,96 +15412,119 @@ public final class MasterProcedureProtos {
"StateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb." +
"UserInformation\022\'\n\ntable_name\030\002 \002(\0132\023.hb" +
"ase.pb.TableName\022)\n\013region_info\030\003 \003(\0132\024.",
- "hbase.pb.RegionInfo\"\344\001\n\030AddColumnFamilyS" +
- "tateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.U" +
- "serInformation\022\'\n\ntable_name\030\002 \002(\0132\023.hba" +
- "se.pb.TableName\0229\n\023columnfamily_schema\030\003" +
- " \002(\0132\034.hbase.pb.ColumnFamilySchema\0226\n\027un" +
+ "hbase.pb.RegionInfo\"W\n\030CreateNamespaceSt" +
+ "ateData\022;\n\024namespace_descriptor\030\001 \002(\0132\035." +
+ "hbase.pb.NamespaceDescriptor\"\237\001\n\030ModifyN" +
+ "amespaceStateData\022;\n\024namespace_descripto" +
+ "r\030\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\022F" +
+ "\n\037unmodified_namespace_descriptor\030\002 \001(\0132" +
+ "\035.hbase.pb.NamespaceDescriptor\"o\n\030Delete" +
+ "NamespaceStateData\022\026\n\016namespace_name\030\001 \002" +
+ "(\t\022;\n\024namespace_descriptor\030\002 \001(\0132\035.hbase" +
+ ".pb.NamespaceDescriptor\"\344\001\n\030AddColumnFam",
+ "ilyStateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase." +
+ "pb.UserInformation\022\'\n\ntable_name\030\002 \002(\0132\023" +
+ ".hbase.pb.TableName\0229\n\023columnfamily_sche" +
+ "ma\030\003 \002(\0132\034.hbase.pb.ColumnFamilySchema\0226" +
+ "\n\027unmodified_table_schema\030\004 \001(\0132\025.hbase." +
+ "pb.TableSchema\"\347\001\n\033ModifyColumnFamilySta" +
+ "teData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.Use" +
+ "rInformation\022\'\n\ntable_name\030\002 \002(\0132\023.hbase" +
+ ".pb.TableName\0229\n\023columnfamily_schema\030\003 \002" +
+ "(\0132\034.hbase.pb.ColumnFamilySchema\0226\n\027unmo",
+ "dified_table_schema\030\004 \001(\0132\025.hbase.pb.Tab" +
+ "leSchema\"\307\001\n\033DeleteColumnFamilyStateData" +
+ "\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserInfor" +
+ "mation\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.Ta" +
+ "bleName\022\031\n\021columnfamily_name\030\003 \002(\014\0226\n\027un" +
"modified_table_schema\030\004 \001(\0132\025.hbase.pb.T" +
- "ableSchema\"\347\001\n\033ModifyColumnFamilyStateDa" +
- "ta\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserInf" +
- "ormation\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb." +
- "TableName\0229\n\023columnfamily_schema\030\003 \002(\0132\034",
- ".hbase.pb.ColumnFamilySchema\0226\n\027unmodifi" +
- "ed_table_schema\030\004 \001(\0132\025.hbase.pb.TableSc" +
- "hema\"\307\001\n\033DeleteColumnFamilyStateData\022,\n\t" +
- "user_info\030\001 \002(\0132\031.hbase.pb.UserInformati" +
- "on\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableN" +
- "ame\022\031\n\021columnfamily_name\030\003 \002(\014\0226\n\027unmodi" +
- "fied_table_schema\030\004 \001(\0132\025.hbase.pb.Table" +
- "Schema\"\215\001\n\024EnableTableStateData\022,\n\tuser_" +
- "info\030\001 \002(\0132\031.hbase.pb.UserInformation\022\'\n" +
- "\ntable_name\030\002 \002(\0132\023.hbase.pb.TableName\022\036",
- "\n\026skip_table_state_check\030\003 \002(\010\"\216\001\n\025Disab" +
- "leTableStateData\022,\n\tuser_info\030\001 \002(\0132\031.hb" +
- "ase.pb.UserInformation\022\'\n\ntable_name\030\002 \002" +
- "(\0132\023.hbase.pb.TableName\022\036\n\026skip_table_st" +
- "ate_check\030\003 \002(\010\"\201\002\n\024ServerCrashStateData" +
- "\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.ServerN" +
- "ame\022\036\n\026distributed_log_replay\030\002 \001(\010\0227\n\031r" +
- "egions_on_crashed_server\030\003 \003(\0132\024.hbase.p" +
- "b.RegionInfo\022.\n\020regions_assigned\030\004 \003(\0132\024" +
- ".hbase.pb.RegionInfo\022\025\n\rcarrying_meta\030\005 ",
- "\001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004true*\330\001\n\020" +
- "CreateTableState\022\036\n\032CREATE_TABLE_PRE_OPE" +
- "RATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT" +
- "\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREA" +
- "TE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABL" +
- "E_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_PO" +
- "ST_OPERATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024M" +
- "ODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PR" +
- "E_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE_TAB" +
- "LE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_R",
- "EPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_F" +
- "S_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATIO" +
- "N\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007" +
- "*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_TABL" +
- "E_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_REMO" +
- "VE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR_F" +
- "S_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_FS_L" +
- "AYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META\020\005\022" +
- "!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n\035TR" +
- "UNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020Delete",
- "TableState\022\036\n\032DELETE_TABLE_PRE_OPERATION" +
- "\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n" +
- "\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELET" +
- "E_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TA" +
- "BLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_P" +
- "OST_OPERATION\020\006*\331\001\n\024AddColumnFamilyState" +
- "\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_C" +
+ "ableSchema\"\215\001\n\024EnableTableStateData\022,\n\tu" +
+ "ser_info\030\001 \002(\0132\031.hbase.pb.UserInformatio" +
+ "n\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableNa" +
+ "me\022\036\n\026skip_table_state_check\030\003 \002(\010\"\216\001\n\025D",
+ "isableTableStateData\022,\n\tuser_info\030\001 \002(\0132" +
+ "\031.hbase.pb.UserInformation\022\'\n\ntable_name" +
+ "\030\002 \002(\0132\023.hbase.pb.TableName\022\036\n\026skip_tabl" +
+ "e_state_check\030\003 \002(\010\"\201\002\n\024ServerCrashState" +
+ "Data\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Ser" +
+ "verName\022\036\n\026distributed_log_replay\030\002 \001(\010\022" +
+ "7\n\031regions_on_crashed_server\030\003 \003(\0132\024.hba" +
+ "se.pb.RegionInfo\022.\n\020regions_assigned\030\004 \003" +
+ "(\0132\024.hbase.pb.RegionInfo\022\025\n\rcarrying_met" +
+ "a\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004true*",
+ "\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE" +
+ "_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LA" +
+ "YOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033" +
+ "CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_" +
+ "TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABL" +
+ "E_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022" +
+ "\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABL" +
+ "E_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE" +
+ "_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMO" +
+ "VE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELE",
+ "TE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPER" +
+ "ATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIO" +
+ "NS\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_" +
+ "TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_" +
+ "REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLE" +
+ "AR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_" +
+ "FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_MET" +
+ "A\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!" +
+ "\n\035TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020De" +
+ "leteTableState\022\036\n\032DELETE_TABLE_PRE_OPERA",
+ "TION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020" +
+ "\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036D" +
+ "ELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELET" +
+ "E_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TAB" +
+ "LE_POST_OPERATION\020\006*\320\001\n\024CreateNamespaceS" +
+ "tate\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CR" +
+ "EATE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n%CRE" +
+ "ATE_NAMESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032" +
+ "CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_N" +
+ "AMESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024Modify",
+ "NamespaceState\022\034\n\030MODIFY_NAMESPACE_PREPA" +
+ "RE\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_TABLE" +
+ "\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024D" +
+ "eleteNamespaceState\022\034\n\030DELETE_NAMESPACE_" +
+ "PREPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE_FRO" +
+ "M_NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_" +
+ "FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIR" +
+ "ECTORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NA" +
+ "MESPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyState" +
+ "\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_C",
"OLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLU" +
"MN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n A" +
- "DD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD",
+ "DD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD" +
"_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027" +
"ModifyColumnFamilyState\022 \n\034MODIFY_COLUMN" +
"_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMIL" +
"Y_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMIL" +
"Y_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_CO" +
"LUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_C" +
- "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027De" +
+ "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027De",
"leteColumnFamilyState\022 \n\034DELETE_COLUMN_F" +
"AMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_" +
- "PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_",
+ "PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_" +
"UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLU" +
"MN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_C" +
"OLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_" +
"COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020E" +
"nableTableState\022\030\n\024ENABLE_TABLE_PREPARE\020" +
"\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENA" +
- "BLE_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n " +
+ "BLE_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n ",
"ENABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$EN" +
"ABLE_TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033" +
- "ENABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Disabl",
+ "ENABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Disabl" +
"eTableState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037" +
"\n\033DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISAB" +
"LE_TABLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"" +
"DISABLE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&" +
"DISABLE_TABLE_SET_DISABLED_TABLE_STATE\020\005" +
"\022 \n\034DISABLE_TABLE_POST_OPERATION\020\006*\234\002\n\020S" +
- "erverCrashState\022\026\n\022SERVER_CRASH_START\020\001\022" +
+ "erverCrashState\022\026\n\022SERVER_CRASH_START\020\001\022",
"\035\n\031SERVER_CRASH_PROCESS_META\020\002\022\034\n\030SERVER" +
"_CRASH_GET_REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_" +
- "SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020",
+ "SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020" +
"\005\022#\n\037SERVER_CRASH_PREPARE_LOG_REPLAY\020\006\022\027" +
"\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_" +
"WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020" +
@@ -13148,38 +15560,56 @@ public final class MasterProcedureProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DeleteTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", });
- internal_static_hbase_pb_AddColumnFamilyStateData_descriptor =
+ internal_static_hbase_pb_CreateNamespaceStateData_descriptor =
getDescriptor().getMessageTypes().get(4);
+ internal_static_hbase_pb_CreateNamespaceStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_CreateNamespaceStateData_descriptor,
+ new java.lang.String[] { "NamespaceDescriptor", });
+ internal_static_hbase_pb_ModifyNamespaceStateData_descriptor =
+ getDescriptor().getMessageTypes().get(5);
+ internal_static_hbase_pb_ModifyNamespaceStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_ModifyNamespaceStateData_descriptor,
+ new java.lang.String[] { "NamespaceDescriptor", "UnmodifiedNamespaceDescriptor", });
+ internal_static_hbase_pb_DeleteNamespaceStateData_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_hbase_pb_DeleteNamespaceStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_DeleteNamespaceStateData_descriptor,
+ new java.lang.String[] { "NamespaceName", "NamespaceDescriptor", });
+ internal_static_hbase_pb_AddColumnFamilyStateData_descriptor =
+ getDescriptor().getMessageTypes().get(7);
internal_static_hbase_pb_AddColumnFamilyStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_AddColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", });
internal_static_hbase_pb_ModifyColumnFamilyStateData_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(8);
internal_static_hbase_pb_ModifyColumnFamilyStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ModifyColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", });
internal_static_hbase_pb_DeleteColumnFamilyStateData_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(9);
internal_static_hbase_pb_DeleteColumnFamilyStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DeleteColumnFamilyStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilyName", "UnmodifiedTableSchema", });
internal_static_hbase_pb_EnableTableStateData_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(10);
internal_static_hbase_pb_EnableTableStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_EnableTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", });
internal_static_hbase_pb_DisableTableStateData_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(11);
internal_static_hbase_pb_DisableTableStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DisableTableStateData_descriptor,
new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", });
internal_static_hbase_pb_ServerCrashStateData_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(12);
internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ServerCrashStateData_descriptor,
diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index eb98b42..412d792 100644
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -16424,6 +16424,26 @@ public final class MasterProtos {
* required .hbase.pb.NamespaceDescriptor namespaceDescriptor = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder();
+
+ // optional uint64 nonce_group = 2 [default = 0];
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ boolean hasNonceGroup();
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ long getNonceGroup();
+
+ // optional uint64 nonce = 3 [default = 0];
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ boolean hasNonce();
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ long getNonce();
}
/**
* Protobuf type {@code hbase.pb.CreateNamespaceRequest}
@@ -16489,6 +16509,16 @@ public final class MasterProtos {
bitField0_ |= 0x00000001;
break;
}
+ case 16: {
+ bitField0_ |= 0x00000002;
+ nonceGroup_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ nonce_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -16551,8 +16581,42 @@ public final class MasterProtos {
return namespaceDescriptor_;
}
+ // optional uint64 nonce_group = 2 [default = 0];
+ public static final int NONCE_GROUP_FIELD_NUMBER = 2;
+ private long nonceGroup_;
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+
+ // optional uint64 nonce = 3 [default = 0];
+ public static final int NONCE_FIELD_NUMBER = 3;
+ private long nonce_;
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+
private void initFields() {
namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ nonceGroup_ = 0L;
+ nonce_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -16577,6 +16641,12 @@ public final class MasterProtos {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, namespaceDescriptor_);
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, nonce_);
+ }
getUnknownFields().writeTo(output);
}
@@ -16590,6 +16660,14 @@ public final class MasterProtos {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, namespaceDescriptor_);
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, nonce_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -16618,6 +16696,16 @@ public final class MasterProtos {
result = result && getNamespaceDescriptor()
.equals(other.getNamespaceDescriptor());
}
+ result = result && (hasNonceGroup() == other.hasNonceGroup());
+ if (hasNonceGroup()) {
+ result = result && (getNonceGroup()
+ == other.getNonceGroup());
+ }
+ result = result && (hasNonce() == other.hasNonce());
+ if (hasNonce()) {
+ result = result && (getNonce()
+ == other.getNonce());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -16635,6 +16723,14 @@ public final class MasterProtos {
hash = (37 * hash) + NAMESPACEDESCRIPTOR_FIELD_NUMBER;
hash = (53 * hash) + getNamespaceDescriptor().hashCode();
}
+ if (hasNonceGroup()) {
+ hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNonceGroup());
+ }
+ if (hasNonce()) {
+ hash = (37 * hash) + NONCE_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNonce());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -16751,6 +16847,10 @@ public final class MasterProtos {
namespaceDescriptorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
+ nonceGroup_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ nonce_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -16787,6 +16887,14 @@ public final class MasterProtos {
} else {
result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.nonceGroup_ = nonceGroup_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.nonce_ = nonce_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -16806,6 +16914,12 @@ public final class MasterProtos {
if (other.hasNamespaceDescriptor()) {
mergeNamespaceDescriptor(other.getNamespaceDescriptor());
}
+ if (other.hasNonceGroup()) {
+ setNonceGroup(other.getNonceGroup());
+ }
+ if (other.hasNonce()) {
+ setNonce(other.getNonce());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -16958,6 +17072,72 @@ public final class MasterProtos {
return namespaceDescriptorBuilder_;
}
+ // optional uint64 nonce_group = 2 [default = 0];
+ private long nonceGroup_ ;
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public Builder setNonceGroup(long value) {
+ bitField0_ |= 0x00000002;
+ nonceGroup_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public Builder clearNonceGroup() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ nonceGroup_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 nonce = 3 [default = 0];
+ private long nonce_ ;
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public Builder setNonce(long value) {
+ bitField0_ |= 0x00000004;
+ nonce_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public Builder clearNonce() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ nonce_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.CreateNamespaceRequest)
}
@@ -17324,6 +17504,26 @@ public final class MasterProtos {
*/
com.google.protobuf.ByteString
getNamespaceNameBytes();
+
+ // optional uint64 nonce_group = 2 [default = 0];
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ boolean hasNonceGroup();
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ long getNonceGroup();
+
+ // optional uint64 nonce = 3 [default = 0];
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ boolean hasNonce();
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ long getNonce();
}
/**
* Protobuf type {@code hbase.pb.DeleteNamespaceRequest}
@@ -17381,6 +17581,16 @@ public final class MasterProtos {
namespaceName_ = input.readBytes();
break;
}
+ case 16: {
+ bitField0_ |= 0x00000002;
+ nonceGroup_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ nonce_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -17464,8 +17674,42 @@ public final class MasterProtos {
}
}
+ // optional uint64 nonce_group = 2 [default = 0];
+ public static final int NONCE_GROUP_FIELD_NUMBER = 2;
+ private long nonceGroup_;
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+
+ // optional uint64 nonce = 3 [default = 0];
+ public static final int NONCE_FIELD_NUMBER = 3;
+ private long nonce_;
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+
private void initFields() {
namespaceName_ = "";
+ nonceGroup_ = 0L;
+ nonce_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -17486,6 +17730,12 @@ public final class MasterProtos {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getNamespaceNameBytes());
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, nonce_);
+ }
getUnknownFields().writeTo(output);
}
@@ -17499,6 +17749,14 @@ public final class MasterProtos {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getNamespaceNameBytes());
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, nonce_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -17527,6 +17785,16 @@ public final class MasterProtos {
result = result && getNamespaceName()
.equals(other.getNamespaceName());
}
+ result = result && (hasNonceGroup() == other.hasNonceGroup());
+ if (hasNonceGroup()) {
+ result = result && (getNonceGroup()
+ == other.getNonceGroup());
+ }
+ result = result && (hasNonce() == other.hasNonce());
+ if (hasNonce()) {
+ result = result && (getNonce()
+ == other.getNonce());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -17544,6 +17812,14 @@ public final class MasterProtos {
hash = (37 * hash) + NAMESPACENAME_FIELD_NUMBER;
hash = (53 * hash) + getNamespaceName().hashCode();
}
+ if (hasNonceGroup()) {
+ hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNonceGroup());
+ }
+ if (hasNonce()) {
+ hash = (37 * hash) + NONCE_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNonce());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -17655,6 +17931,10 @@ public final class MasterProtos {
super.clear();
namespaceName_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
+ nonceGroup_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ nonce_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -17687,6 +17967,14 @@ public final class MasterProtos {
to_bitField0_ |= 0x00000001;
}
result.namespaceName_ = namespaceName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.nonceGroup_ = nonceGroup_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.nonce_ = nonce_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -17708,6 +17996,12 @@ public final class MasterProtos {
namespaceName_ = other.namespaceName_;
onChanged();
}
+ if (other.hasNonceGroup()) {
+ setNonceGroup(other.getNonceGroup());
+ }
+ if (other.hasNonce()) {
+ setNonce(other.getNonce());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -17813,6 +18107,72 @@ public final class MasterProtos {
return this;
}
+ // optional uint64 nonce_group = 2 [default = 0];
+ private long nonceGroup_ ;
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public Builder setNonceGroup(long value) {
+ bitField0_ |= 0x00000002;
+ nonceGroup_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public Builder clearNonceGroup() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ nonceGroup_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 nonce = 3 [default = 0];
+ private long nonce_ ;
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public Builder setNonce(long value) {
+ bitField0_ |= 0x00000004;
+ nonce_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public Builder clearNonce() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ nonce_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.DeleteNamespaceRequest)
}
@@ -18178,6 +18538,26 @@ public final class MasterProtos {
* required .hbase.pb.NamespaceDescriptor namespaceDescriptor = 1;
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder();
+
+ // optional uint64 nonce_group = 2 [default = 0];
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ boolean hasNonceGroup();
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ long getNonceGroup();
+
+ // optional uint64 nonce = 3 [default = 0];
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ boolean hasNonce();
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ long getNonce();
}
/**
* Protobuf type {@code hbase.pb.ModifyNamespaceRequest}
@@ -18243,6 +18623,16 @@ public final class MasterProtos {
bitField0_ |= 0x00000001;
break;
}
+ case 16: {
+ bitField0_ |= 0x00000002;
+ nonceGroup_ = input.readUInt64();
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ nonce_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -18305,8 +18695,42 @@ public final class MasterProtos {
return namespaceDescriptor_;
}
+ // optional uint64 nonce_group = 2 [default = 0];
+ public static final int NONCE_GROUP_FIELD_NUMBER = 2;
+ private long nonceGroup_;
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+
+ // optional uint64 nonce = 3 [default = 0];
+ public static final int NONCE_FIELD_NUMBER = 3;
+ private long nonce_;
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+
private void initFields() {
namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance();
+ nonceGroup_ = 0L;
+ nonce_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -18331,6 +18755,12 @@ public final class MasterProtos {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, namespaceDescriptor_);
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, nonce_);
+ }
getUnknownFields().writeTo(output);
}
@@ -18344,6 +18774,14 @@ public final class MasterProtos {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, namespaceDescriptor_);
}
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, nonceGroup_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, nonce_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -18372,6 +18810,16 @@ public final class MasterProtos {
result = result && getNamespaceDescriptor()
.equals(other.getNamespaceDescriptor());
}
+ result = result && (hasNonceGroup() == other.hasNonceGroup());
+ if (hasNonceGroup()) {
+ result = result && (getNonceGroup()
+ == other.getNonceGroup());
+ }
+ result = result && (hasNonce() == other.hasNonce());
+ if (hasNonce()) {
+ result = result && (getNonce()
+ == other.getNonce());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -18389,6 +18837,14 @@ public final class MasterProtos {
hash = (37 * hash) + NAMESPACEDESCRIPTOR_FIELD_NUMBER;
hash = (53 * hash) + getNamespaceDescriptor().hashCode();
}
+ if (hasNonceGroup()) {
+ hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNonceGroup());
+ }
+ if (hasNonce()) {
+ hash = (37 * hash) + NONCE_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getNonce());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -18505,6 +18961,10 @@ public final class MasterProtos {
namespaceDescriptorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
+ nonceGroup_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ nonce_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -18541,6 +19001,14 @@ public final class MasterProtos {
} else {
result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build();
}
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.nonceGroup_ = nonceGroup_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.nonce_ = nonce_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -18560,6 +19028,12 @@ public final class MasterProtos {
if (other.hasNamespaceDescriptor()) {
mergeNamespaceDescriptor(other.getNamespaceDescriptor());
}
+ if (other.hasNonceGroup()) {
+ setNonceGroup(other.getNonceGroup());
+ }
+ if (other.hasNonce()) {
+ setNonce(other.getNonce());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -18712,6 +19186,72 @@ public final class MasterProtos {
return namespaceDescriptorBuilder_;
}
+ // optional uint64 nonce_group = 2 [default = 0];
+ private long nonceGroup_ ;
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public boolean hasNonceGroup() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public long getNonceGroup() {
+ return nonceGroup_;
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public Builder setNonceGroup(long value) {
+ bitField0_ |= 0x00000002;
+ nonceGroup_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce_group = 2 [default = 0];
+ */
+ public Builder clearNonceGroup() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ nonceGroup_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 nonce = 3 [default = 0];
+ private long nonce_ ;
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public boolean hasNonce() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public long getNonce() {
+ return nonce_;
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public Builder setNonce(long value) {
+ bitField0_ |= 0x00000004;
+ nonce_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 nonce = 3 [default = 0];
+ */
+ public Builder clearNonce() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ nonce_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.ModifyNamespaceRequest)
}
@@ -56042,232 +56582,235 @@ public final class MasterProtos {
"ableName\022+\n\014table_schema\030\002 \002(\0132\025.hbase.p" +
"b.TableSchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n" +
"\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableResponse\022\017" +
- "\n\007proc_id\030\001 \001(\004\"T\n\026CreateNamespaceReques" +
+ "\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamespaceReques" +
"t\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase.p" +
- "b.NamespaceDescriptor\"\031\n\027CreateNamespace" +
- "Response\"/\n\026DeleteNamespaceRequest\022\025\n\rna" +
- "mespaceName\030\001 \002(\t\"\031\n\027DeleteNamespaceResp",
- "onse\"T\n\026ModifyNamespaceRequest\022:\n\023namesp" +
- "aceDescriptor\030\001 \002(\0132\035.hbase.pb.Namespace" +
- "Descriptor\"\031\n\027ModifyNamespaceResponse\"6\n" +
- "\035GetNamespaceDescriptorRequest\022\025\n\rnamesp" +
- "aceName\030\001 \002(\t\"\\\n\036GetNamespaceDescriptorR" +
- "esponse\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.h" +
- "base.pb.NamespaceDescriptor\"!\n\037ListNames" +
- "paceDescriptorsRequest\"^\n ListNamespaceD" +
- "escriptorsResponse\022:\n\023namespaceDescripto" +
- "r\030\001 \003(\0132\035.hbase.pb.NamespaceDescriptor\"?",
- "\n&ListTableDescriptorsByNamespaceRequest" +
- "\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTableDesc" +
- "riptorsByNamespaceResponse\022*\n\013tableSchem" +
- "a\030\001 \003(\0132\025.hbase.pb.TableSchema\"9\n ListTa" +
- "bleNamesByNamespaceRequest\022\025\n\rnamespaceN" +
- "ame\030\001 \002(\t\"K\n!ListTableNamesByNamespaceRe" +
- "sponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.pb.Tab" +
- "leName\"\021\n\017ShutdownRequest\"\022\n\020ShutdownRes" +
- "ponse\"\023\n\021StopMasterRequest\"\024\n\022StopMaster" +
- "Response\"\020\n\016BalanceRequest\"\'\n\017BalanceRes",
- "ponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalanc" +
- "erRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchron" +
- "ous\030\002 \001(\010\"8\n\032SetBalancerRunningResponse\022" +
- "\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030IsBalance" +
- "rEnabledRequest\",\n\031IsBalancerEnabledResp" +
- "onse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogScanR" +
- "equest\"-\n\026RunCatalogScanResponse\022\023\n\013scan" +
- "_result\030\001 \001(\005\"-\n\033EnableCatalogJanitorReq" +
- "uest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJan" +
- "itorResponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCa",
- "talogJanitorEnabledRequest\"0\n\037IsCatalogJ" +
- "anitorEnabledResponse\022\r\n\005value\030\001 \002(\010\"B\n\017" +
- "SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbas" +
- "e.pb.SnapshotDescription\",\n\020SnapshotResp" +
- "onse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetCom" +
- "pletedSnapshotsRequest\"Q\n\035GetCompletedSn" +
- "apshotsResponse\0220\n\tsnapshots\030\001 \003(\0132\035.hba" +
- "se.pb.SnapshotDescription\"H\n\025DeleteSnaps" +
- "hotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb." +
- "SnapshotDescription\"\030\n\026DeleteSnapshotRes",
- "ponse\"I\n\026RestoreSnapshotRequest\022/\n\010snaps" +
- "hot\030\001 \002(\0132\035.hbase.pb.SnapshotDescription" +
- "\"\031\n\027RestoreSnapshotResponse\"H\n\025IsSnapsho" +
- "tDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.p" +
- "b.SnapshotDescription\"^\n\026IsSnapshotDoneR" +
- "esponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snapshot" +
- "\030\002 \001(\0132\035.hbase.pb.SnapshotDescription\"O\n" +
- "\034IsRestoreSnapshotDoneRequest\022/\n\010snapsho" +
- "t\030\001 \001(\0132\035.hbase.pb.SnapshotDescription\"4" +
- "\n\035IsRestoreSnapshotDoneResponse\022\023\n\004done\030",
- "\001 \001(\010:\005false\"F\n\033GetSchemaAlterStatusRequ" +
- "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
- "Name\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025" +
- "yet_to_update_regions\030\001 \001(\r\022\025\n\rtotal_reg" +
- "ions\030\002 \001(\r\"\213\001\n\032GetTableDescriptorsReques" +
- "t\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableN" +
- "ame\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_tables" +
- "\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetT" +
- "ableDescriptorsResponse\022+\n\014table_schema\030" +
- "\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024GetTable",
- "NamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_s" +
- "ys_tables\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(" +
- "\t\"A\n\025GetTableNamesResponse\022(\n\013table_name" +
- "s\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024GetTable" +
- "StateRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase" +
- ".pb.TableName\"B\n\025GetTableStateResponse\022)" +
- "\n\013table_state\030\001 \002(\0132\024.hbase.pb.TableStat" +
- "e\"\031\n\027GetClusterStatusRequest\"K\n\030GetClust" +
- "erStatusResponse\022/\n\016cluster_status\030\001 \002(\013" +
- "2\027.hbase.pb.ClusterStatus\"\030\n\026IsMasterRun",
- "ningRequest\"4\n\027IsMasterRunningResponse\022\031" +
- "\n\021is_master_running\030\001 \002(\010\"I\n\024ExecProcedu" +
- "reRequest\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb." +
- "ProcedureDescription\"F\n\025ExecProcedureRes" +
- "ponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013retur" +
- "n_data\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221" +
- "\n\tprocedure\030\001 \001(\0132\036.hbase.pb.ProcedureDe" +
- "scription\"`\n\027IsProcedureDoneResponse\022\023\n\004" +
- "done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hb" +
- "ase.pb.ProcedureDescription\",\n\031GetProced",
- "ureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032Ge" +
- "tProcedureResultResponse\0229\n\005state\030\001 \002(\0162" +
- "*.hbase.pb.GetProcedureResultResponse.St" +
- "ate\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003" +
- " \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132" +
- "!.hbase.pb.ForeignExceptionMessage\"1\n\005St" +
- "ate\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINIS" +
- "HED\020\002\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030\001" +
- " \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003 " +
- "\001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.Table",
- "Name\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_globa" +
- "ls\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.Th" +
- "rottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Maj" +
- "orCompactionTimestampRequest\022\'\n\ntable_na" +
- "me\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorCo" +
- "mpactionTimestampForRegionRequest\022)\n\006reg" +
- "ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n " +
- "MajorCompactionTimestampResponse\022\034\n\024comp" +
- "action_timestamp\030\001 \002(\003\"\035\n\033SecurityCapabi" +
- "litiesRequest\"\354\001\n\034SecurityCapabilitiesRe",
- "sponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb." +
- "SecurityCapabilitiesResponse.Capability\"" +
- "\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION\020" +
- "\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZ" +
- "ATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_" +
- "VISIBILITY\020\0042\301#\n\rMasterService\022e\n\024GetSch" +
- "emaAlterStatus\022%.hbase.pb.GetSchemaAlter" +
- "StatusRequest\032&.hbase.pb.GetSchemaAlterS" +
- "tatusResponse\022b\n\023GetTableDescriptors\022$.h" +
- "base.pb.GetTableDescriptorsRequest\032%.hba",
- "se.pb.GetTableDescriptorsResponse\022P\n\rGet" +
- "TableNames\022\036.hbase.pb.GetTableNamesReque" +
- "st\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020G" +
- "etClusterStatus\022!.hbase.pb.GetClusterSta" +
- "tusRequest\032\".hbase.pb.GetClusterStatusRe" +
- "sponse\022V\n\017IsMasterRunning\022 .hbase.pb.IsM" +
- "asterRunningRequest\032!.hbase.pb.IsMasterR" +
- "unningResponse\022D\n\tAddColumn\022\032.hbase.pb.A" +
- "ddColumnRequest\032\033.hbase.pb.AddColumnResp" +
- "onse\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteCo",
- "lumnRequest\032\036.hbase.pb.DeleteColumnRespo" +
- "nse\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyCol" +
- "umnRequest\032\036.hbase.pb.ModifyColumnRespon" +
- "se\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionRe" +
- "quest\032\034.hbase.pb.MoveRegionResponse\022k\n\026D" +
- "ispatchMergingRegions\022\'.hbase.pb.Dispatc" +
- "hMergingRegionsRequest\032(.hbase.pb.Dispat" +
- "chMergingRegionsResponse\022M\n\014AssignRegion" +
- "\022\035.hbase.pb.AssignRegionRequest\032\036.hbase." +
- "pb.AssignRegionResponse\022S\n\016UnassignRegio",
- "n\022\037.hbase.pb.UnassignRegionRequest\032 .hba" +
- "se.pb.UnassignRegionResponse\022P\n\rOfflineR" +
- "egion\022\036.hbase.pb.OfflineRegionRequest\032\037." +
- "hbase.pb.OfflineRegionResponse\022J\n\013Delete" +
- "Table\022\034.hbase.pb.DeleteTableRequest\032\035.hb" +
- "ase.pb.DeleteTableResponse\022P\n\rtruncateTa" +
- "ble\022\036.hbase.pb.TruncateTableRequest\032\037.hb" +
- "ase.pb.TruncateTableResponse\022J\n\013EnableTa" +
- "ble\022\034.hbase.pb.EnableTableRequest\032\035.hbas" +
- "e.pb.EnableTableResponse\022M\n\014DisableTable",
- "\022\035.hbase.pb.DisableTableRequest\032\036.hbase." +
- "pb.DisableTableResponse\022J\n\013ModifyTable\022\034" +
- ".hbase.pb.ModifyTableRequest\032\035.hbase.pb." +
- "ModifyTableResponse\022J\n\013CreateTable\022\034.hba" +
- "se.pb.CreateTableRequest\032\035.hbase.pb.Crea" +
- "teTableResponse\022A\n\010Shutdown\022\031.hbase.pb.S" +
- "hutdownRequest\032\032.hbase.pb.ShutdownRespon" +
- "se\022G\n\nStopMaster\022\033.hbase.pb.StopMasterRe" +
- "quest\032\034.hbase.pb.StopMasterResponse\022>\n\007B" +
- "alance\022\030.hbase.pb.BalanceRequest\032\031.hbase",
- ".pb.BalanceResponse\022_\n\022SetBalancerRunnin" +
- "g\022#.hbase.pb.SetBalancerRunningRequest\032$" +
- ".hbase.pb.SetBalancerRunningResponse\022\\\n\021" +
- "IsBalancerEnabled\022\".hbase.pb.IsBalancerE" +
- "nabledRequest\032#.hbase.pb.IsBalancerEnabl" +
- "edResponse\022S\n\016RunCatalogScan\022\037.hbase.pb." +
- "RunCatalogScanRequest\032 .hbase.pb.RunCata" +
- "logScanResponse\022e\n\024EnableCatalogJanitor\022" +
- "%.hbase.pb.EnableCatalogJanitorRequest\032&" +
- ".hbase.pb.EnableCatalogJanitorResponse\022n",
- "\n\027IsCatalogJanitorEnabled\022(.hbase.pb.IsC" +
- "atalogJanitorEnabledRequest\032).hbase.pb.I" +
- "sCatalogJanitorEnabledResponse\022^\n\021ExecMa" +
- "sterService\022#.hbase.pb.CoprocessorServic" +
- "eRequest\032$.hbase.pb.CoprocessorServiceRe" +
- "sponse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotRe" +
- "quest\032\032.hbase.pb.SnapshotResponse\022h\n\025Get" +
- "CompletedSnapshots\022&.hbase.pb.GetComplet" +
- "edSnapshotsRequest\032\'.hbase.pb.GetComplet" +
- "edSnapshotsResponse\022S\n\016DeleteSnapshot\022\037.",
- "hbase.pb.DeleteSnapshotRequest\032 .hbase.p" +
- "b.DeleteSnapshotResponse\022S\n\016IsSnapshotDo" +
- "ne\022\037.hbase.pb.IsSnapshotDoneRequest\032 .hb" +
- "ase.pb.IsSnapshotDoneResponse\022V\n\017Restore" +
- "Snapshot\022 .hbase.pb.RestoreSnapshotReque" +
- "st\032!.hbase.pb.RestoreSnapshotResponse\022h\n" +
- "\025IsRestoreSnapshotDone\022&.hbase.pb.IsRest" +
- "oreSnapshotDoneRequest\032\'.hbase.pb.IsRest" +
- "oreSnapshotDoneResponse\022P\n\rExecProcedure" +
- "\022\036.hbase.pb.ExecProcedureRequest\032\037.hbase",
- ".pb.ExecProcedureResponse\022W\n\024ExecProcedu" +
- "reWithRet\022\036.hbase.pb.ExecProcedureReques" +
- "t\032\037.hbase.pb.ExecProcedureResponse\022V\n\017Is" +
- "ProcedureDone\022 .hbase.pb.IsProcedureDone" +
- "Request\032!.hbase.pb.IsProcedureDoneRespon" +
- "se\022V\n\017ModifyNamespace\022 .hbase.pb.ModifyN" +
- "amespaceRequest\032!.hbase.pb.ModifyNamespa" +
- "ceResponse\022V\n\017CreateNamespace\022 .hbase.pb" +
- ".CreateNamespaceRequest\032!.hbase.pb.Creat" +
- "eNamespaceResponse\022V\n\017DeleteNamespace\022 .",
- "hbase.pb.DeleteNamespaceRequest\032!.hbase." +
- "pb.DeleteNamespaceResponse\022k\n\026GetNamespa" +
- "ceDescriptor\022\'.hbase.pb.GetNamespaceDesc" +
- "riptorRequest\032(.hbase.pb.GetNamespaceDes" +
- "criptorResponse\022q\n\030ListNamespaceDescript" +
- "ors\022).hbase.pb.ListNamespaceDescriptorsR" +
- "equest\032*.hbase.pb.ListNamespaceDescripto" +
- "rsResponse\022\206\001\n\037ListTableDescriptorsByNam" +
- "espace\0220.hbase.pb.ListTableDescriptorsBy" +
- "NamespaceRequest\0321.hbase.pb.ListTableDes",
- "criptorsByNamespaceResponse\022t\n\031ListTable" +
- "NamesByNamespace\022*.hbase.pb.ListTableNam" +
- "esByNamespaceRequest\032+.hbase.pb.ListTabl" +
- "eNamesByNamespaceResponse\022P\n\rGetTableSta" +
- "te\022\036.hbase.pb.GetTableStateRequest\032\037.hba" +
- "se.pb.GetTableStateResponse\022A\n\010SetQuota\022" +
- "\031.hbase.pb.SetQuotaRequest\032\032.hbase.pb.Se" +
- "tQuotaResponse\022x\n\037getLastMajorCompaction" +
- "Timestamp\022).hbase.pb.MajorCompactionTime" +
- "stampRequest\032*.hbase.pb.MajorCompactionT",
- "imestampResponse\022\212\001\n(getLastMajorCompact" +
- "ionTimestampForRegion\0222.hbase.pb.MajorCo" +
- "mpactionTimestampForRegionRequest\032*.hbas" +
- "e.pb.MajorCompactionTimestampResponse\022_\n" +
- "\022getProcedureResult\022#.hbase.pb.GetProced" +
- "ureResultRequest\032$.hbase.pb.GetProcedure" +
- "ResultResponse\022h\n\027getSecurityCapabilitie" +
- "s\022%.hbase.pb.SecurityCapabilitiesRequest" +
- "\032&.hbase.pb.SecurityCapabilitiesResponse" +
- "BB\n*org.apache.hadoop.hbase.protobuf.gen",
- "eratedB\014MasterProtosH\001\210\001\001\240\001\001"
+ "b.NamespaceDescriptor\022\026\n\013nonce_group\030\002 \001" +
+ "(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\031\n\027CreateNamespa" +
+ "ceResponse\"Y\n\026DeleteNamespaceRequest\022\025\n\r",
+ "namespaceName\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004" +
+ ":\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\031\n\027DeleteNamespace" +
+ "Response\"~\n\026ModifyNamespaceRequest\022:\n\023na" +
+ "mespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Names" +
+ "paceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020" +
+ "\n\005nonce\030\003 \001(\004:\0010\"\031\n\027ModifyNamespaceRespo" +
+ "nse\"6\n\035GetNamespaceDescriptorRequest\022\025\n\r" +
+ "namespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDescr" +
+ "iptorResponse\022:\n\023namespaceDescriptor\030\001 \002" +
+ "(\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037Lis",
+ "tNamespaceDescriptorsRequest\"^\n ListName" +
+ "spaceDescriptorsResponse\022:\n\023namespaceDes" +
+ "criptor\030\001 \003(\0132\035.hbase.pb.NamespaceDescri" +
+ "ptor\"?\n&ListTableDescriptorsByNamespaceR" +
+ "equest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTab" +
+ "leDescriptorsByNamespaceResponse\022*\n\013tabl" +
+ "eSchema\030\001 \003(\0132\025.hbase.pb.TableSchema\"9\n " +
+ "ListTableNamesByNamespaceRequest\022\025\n\rname" +
+ "spaceName\030\001 \002(\t\"K\n!ListTableNamesByNames" +
+ "paceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.",
+ "pb.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutd" +
+ "ownResponse\"\023\n\021StopMasterRequest\"\024\n\022Stop" +
+ "MasterResponse\"\020\n\016BalanceRequest\"\'\n\017Bala" +
+ "nceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031Set" +
+ "BalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013sy" +
+ "nchronous\030\002 \001(\010\"8\n\032SetBalancerRunningRes" +
+ "ponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030IsB" +
+ "alancerEnabledRequest\",\n\031IsBalancerEnabl" +
+ "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" +
+ "gScanRequest\"-\n\026RunCatalogScanResponse\022\023",
+ "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" +
+ "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" +
+ "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " +
+ "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa" +
+ "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" +
+ "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" +
+ "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" +
+ "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" +
+ "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" +
+ "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013",
+ "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" +
+ "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" +
+ "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" +
+ "hotResponse\"I\n\026RestoreSnapshotRequest\022/\n" +
+ "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" +
+ "iption\"\031\n\027RestoreSnapshotResponse\"H\n\025IsS" +
+ "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" +
+ "base.pb.SnapshotDescription\"^\n\026IsSnapsho" +
+ "tDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010sn" +
+ "apshot\030\002 \001(\0132\035.hbase.pb.SnapshotDescript",
+ "ion\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010s" +
+ "napshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescrip" +
+ "tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n" +
+ "\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStat" +
+ "usRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
+ ".TableName\"T\n\034GetSchemaAlterStatusRespon" +
+ "se\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtot" +
+ "al_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptors" +
+ "Request\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." +
+ "TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_",
+ "tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J" +
+ "\n\033GetTableDescriptorsResponse\022+\n\014table_s" +
+ "chema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024Ge" +
+ "tTableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inc" +
+ "lude_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespac" +
+ "e\030\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013tabl" +
+ "e_names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024Ge" +
+ "tTableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023" +
+ ".hbase.pb.TableName\"B\n\025GetTableStateResp" +
+ "onse\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Tab",
+ "leState\"\031\n\027GetClusterStatusRequest\"K\n\030Ge" +
+ "tClusterStatusResponse\022/\n\016cluster_status" +
+ "\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMas" +
+ "terRunningRequest\"4\n\027IsMasterRunningResp" +
+ "onse\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecP" +
+ "rocedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hba" +
+ "se.pb.ProcedureDescription\"F\n\025ExecProced" +
+ "ureResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n" +
+ "\013return_data\030\002 \001(\014\"K\n\026IsProcedureDoneReq" +
+ "uest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Proce",
+ "dureDescription\"`\n\027IsProcedureDoneRespon" +
+ "se\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(" +
+ "\0132\036.hbase.pb.ProcedureDescription\",\n\031Get" +
+ "ProcedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"" +
+ "\371\001\n\032GetProcedureResultResponse\0229\n\005state\030" +
+ "\001 \002(\0162*.hbase.pb.GetProcedureResultRespo" +
+ "nse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_up" +
+ "date\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030" +
+ "\005 \001(\0132!.hbase.pb.ForeignExceptionMessage" +
+ "\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n",
+ "\010FINISHED\020\002\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_" +
+ "name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamesp" +
+ "ace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb" +
+ ".TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass" +
+ "_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase" +
+ ".pb.ThrottleRequest\"\022\n\020SetQuotaResponse\"" +
+ "J\n\037MajorCompactionTimestampRequest\022\'\n\nta" +
+ "ble_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(M" +
+ "ajorCompactionTimestampForRegionRequest\022" +
+ ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi",
+ "er\"@\n MajorCompactionTimestampResponse\022\034" +
+ "\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Security" +
+ "CapabilitiesRequest\"\354\001\n\034SecurityCapabili" +
+ "tiesResponse\022G\n\014capabilities\030\001 \003(\01621.hba" +
+ "se.pb.SecurityCapabilitiesResponse.Capab" +
+ "ility\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTIC" +
+ "ATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAU" +
+ "THORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n" +
+ "\017CELL_VISIBILITY\020\0042\301#\n\rMasterService\022e\n\024" +
+ "GetSchemaAlterStatus\022%.hbase.pb.GetSchem",
+ "aAlterStatusRequest\032&.hbase.pb.GetSchema" +
+ "AlterStatusResponse\022b\n\023GetTableDescripto" +
+ "rs\022$.hbase.pb.GetTableDescriptorsRequest" +
+ "\032%.hbase.pb.GetTableDescriptorsResponse\022" +
+ "P\n\rGetTableNames\022\036.hbase.pb.GetTableName" +
+ "sRequest\032\037.hbase.pb.GetTableNamesRespons" +
+ "e\022Y\n\020GetClusterStatus\022!.hbase.pb.GetClus" +
+ "terStatusRequest\032\".hbase.pb.GetClusterSt" +
+ "atusResponse\022V\n\017IsMasterRunning\022 .hbase." +
+ "pb.IsMasterRunningRequest\032!.hbase.pb.IsM",
+ "asterRunningResponse\022D\n\tAddColumn\022\032.hbas" +
+ "e.pb.AddColumnRequest\032\033.hbase.pb.AddColu" +
+ "mnResponse\022M\n\014DeleteColumn\022\035.hbase.pb.De" +
+ "leteColumnRequest\032\036.hbase.pb.DeleteColum" +
+ "nResponse\022M\n\014ModifyColumn\022\035.hbase.pb.Mod" +
+ "ifyColumnRequest\032\036.hbase.pb.ModifyColumn" +
+ "Response\022G\n\nMoveRegion\022\033.hbase.pb.MoveRe" +
+ "gionRequest\032\034.hbase.pb.MoveRegionRespons" +
+ "e\022k\n\026DispatchMergingRegions\022\'.hbase.pb.D" +
+ "ispatchMergingRegionsRequest\032(.hbase.pb.",
+ "DispatchMergingRegionsResponse\022M\n\014Assign" +
+ "Region\022\035.hbase.pb.AssignRegionRequest\032\036." +
+ "hbase.pb.AssignRegionResponse\022S\n\016Unassig" +
+ "nRegion\022\037.hbase.pb.UnassignRegionRequest" +
+ "\032 .hbase.pb.UnassignRegionResponse\022P\n\rOf" +
+ "flineRegion\022\036.hbase.pb.OfflineRegionRequ" +
+ "est\032\037.hbase.pb.OfflineRegionResponse\022J\n\013" +
+ "DeleteTable\022\034.hbase.pb.DeleteTableReques" +
+ "t\032\035.hbase.pb.DeleteTableResponse\022P\n\rtrun" +
+ "cateTable\022\036.hbase.pb.TruncateTableReques",
+ "t\032\037.hbase.pb.TruncateTableResponse\022J\n\013En" +
+ "ableTable\022\034.hbase.pb.EnableTableRequest\032" +
+ "\035.hbase.pb.EnableTableResponse\022M\n\014Disabl" +
+ "eTable\022\035.hbase.pb.DisableTableRequest\032\036." +
+ "hbase.pb.DisableTableResponse\022J\n\013ModifyT" +
+ "able\022\034.hbase.pb.ModifyTableRequest\032\035.hba" +
+ "se.pb.ModifyTableResponse\022J\n\013CreateTable" +
+ "\022\034.hbase.pb.CreateTableRequest\032\035.hbase.p" +
+ "b.CreateTableResponse\022A\n\010Shutdown\022\031.hbas" +
+ "e.pb.ShutdownRequest\032\032.hbase.pb.Shutdown",
+ "Response\022G\n\nStopMaster\022\033.hbase.pb.StopMa" +
+ "sterRequest\032\034.hbase.pb.StopMasterRespons" +
+ "e\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031" +
+ ".hbase.pb.BalanceResponse\022_\n\022SetBalancer" +
+ "Running\022#.hbase.pb.SetBalancerRunningReq" +
+ "uest\032$.hbase.pb.SetBalancerRunningRespon" +
+ "se\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBal" +
+ "ancerEnabledRequest\032#.hbase.pb.IsBalance" +
+ "rEnabledResponse\022S\n\016RunCatalogScan\022\037.hba" +
+ "se.pb.RunCatalogScanRequest\032 .hbase.pb.R",
+ "unCatalogScanResponse\022e\n\024EnableCatalogJa" +
+ "nitor\022%.hbase.pb.EnableCatalogJanitorReq" +
+ "uest\032&.hbase.pb.EnableCatalogJanitorResp" +
+ "onse\022n\n\027IsCatalogJanitorEnabled\022(.hbase." +
+ "pb.IsCatalogJanitorEnabledRequest\032).hbas" +
+ "e.pb.IsCatalogJanitorEnabledResponse\022^\n\021" +
+ "ExecMasterService\022#.hbase.pb.Coprocessor" +
+ "ServiceRequest\032$.hbase.pb.CoprocessorSer" +
+ "viceResponse\022A\n\010Snapshot\022\031.hbase.pb.Snap" +
+ "shotRequest\032\032.hbase.pb.SnapshotResponse\022",
+ "h\n\025GetCompletedSnapshots\022&.hbase.pb.GetC" +
+ "ompletedSnapshotsRequest\032\'.hbase.pb.GetC" +
+ "ompletedSnapshotsResponse\022S\n\016DeleteSnaps" +
+ "hot\022\037.hbase.pb.DeleteSnapshotRequest\032 .h" +
+ "base.pb.DeleteSnapshotResponse\022S\n\016IsSnap" +
+ "shotDone\022\037.hbase.pb.IsSnapshotDoneReques" +
+ "t\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017R" +
+ "estoreSnapshot\022 .hbase.pb.RestoreSnapsho" +
+ "tRequest\032!.hbase.pb.RestoreSnapshotRespo" +
+ "nse\022h\n\025IsRestoreSnapshotDone\022&.hbase.pb.",
+ "IsRestoreSnapshotDoneRequest\032\'.hbase.pb." +
+ "IsRestoreSnapshotDoneResponse\022P\n\rExecPro" +
+ "cedure\022\036.hbase.pb.ExecProcedureRequest\032\037" +
+ ".hbase.pb.ExecProcedureResponse\022W\n\024ExecP" +
+ "rocedureWithRet\022\036.hbase.pb.ExecProcedure" +
+ "Request\032\037.hbase.pb.ExecProcedureResponse" +
+ "\022V\n\017IsProcedureDone\022 .hbase.pb.IsProcedu" +
+ "reDoneRequest\032!.hbase.pb.IsProcedureDone" +
+ "Response\022V\n\017ModifyNamespace\022 .hbase.pb.M" +
+ "odifyNamespaceRequest\032!.hbase.pb.ModifyN",
+ "amespaceResponse\022V\n\017CreateNamespace\022 .hb" +
+ "ase.pb.CreateNamespaceRequest\032!.hbase.pb" +
+ ".CreateNamespaceResponse\022V\n\017DeleteNamesp" +
+ "ace\022 .hbase.pb.DeleteNamespaceRequest\032!." +
+ "hbase.pb.DeleteNamespaceResponse\022k\n\026GetN" +
+ "amespaceDescriptor\022\'.hbase.pb.GetNamespa" +
+ "ceDescriptorRequest\032(.hbase.pb.GetNamesp" +
+ "aceDescriptorResponse\022q\n\030ListNamespaceDe" +
+ "scriptors\022).hbase.pb.ListNamespaceDescri" +
+ "ptorsRequest\032*.hbase.pb.ListNamespaceDes",
+ "criptorsResponse\022\206\001\n\037ListTableDescriptor" +
+ "sByNamespace\0220.hbase.pb.ListTableDescrip" +
+ "torsByNamespaceRequest\0321.hbase.pb.ListTa" +
+ "bleDescriptorsByNamespaceResponse\022t\n\031Lis" +
+ "tTableNamesByNamespace\022*.hbase.pb.ListTa" +
+ "bleNamesByNamespaceRequest\032+.hbase.pb.Li" +
+ "stTableNamesByNamespaceResponse\022P\n\rGetTa" +
+ "bleState\022\036.hbase.pb.GetTableStateRequest" +
+ "\032\037.hbase.pb.GetTableStateResponse\022A\n\010Set" +
+ "Quota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase",
+ ".pb.SetQuotaResponse\022x\n\037getLastMajorComp" +
+ "actionTimestamp\022).hbase.pb.MajorCompacti" +
+ "onTimestampRequest\032*.hbase.pb.MajorCompa" +
+ "ctionTimestampResponse\022\212\001\n(getLastMajorC" +
+ "ompactionTimestampForRegion\0222.hbase.pb.M" +
+ "ajorCompactionTimestampForRegionRequest\032" +
+ "*.hbase.pb.MajorCompactionTimestampRespo" +
+ "nse\022_\n\022getProcedureResult\022#.hbase.pb.Get" +
+ "ProcedureResultRequest\032$.hbase.pb.GetPro" +
+ "cedureResultResponse\022h\n\027getSecurityCapab",
+ "ilities\022%.hbase.pb.SecurityCapabilitiesR" +
+ "equest\032&.hbase.pb.SecurityCapabilitiesRe" +
+ "sponseBB\n*org.apache.hadoop.hbase.protob" +
+ "uf.generatedB\014MasterProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -56447,7 +56990,7 @@ public final class MasterProtos {
internal_static_hbase_pb_CreateNamespaceRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_CreateNamespaceRequest_descriptor,
- new java.lang.String[] { "NamespaceDescriptor", });
+ new java.lang.String[] { "NamespaceDescriptor", "NonceGroup", "Nonce", });
internal_static_hbase_pb_CreateNamespaceResponse_descriptor =
getDescriptor().getMessageTypes().get(29);
internal_static_hbase_pb_CreateNamespaceResponse_fieldAccessorTable = new
@@ -56459,7 +57002,7 @@ public final class MasterProtos {
internal_static_hbase_pb_DeleteNamespaceRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DeleteNamespaceRequest_descriptor,
- new java.lang.String[] { "NamespaceName", });
+ new java.lang.String[] { "NamespaceName", "NonceGroup", "Nonce", });
internal_static_hbase_pb_DeleteNamespaceResponse_descriptor =
getDescriptor().getMessageTypes().get(31);
internal_static_hbase_pb_DeleteNamespaceResponse_fieldAccessorTable = new
@@ -56471,7 +57014,7 @@ public final class MasterProtos {
internal_static_hbase_pb_ModifyNamespaceRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ModifyNamespaceRequest_descriptor,
- new java.lang.String[] { "NamespaceDescriptor", });
+ new java.lang.String[] { "NamespaceDescriptor", "NonceGroup", "Nonce", });
internal_static_hbase_pb_ModifyNamespaceResponse_descriptor =
getDescriptor().getMessageTypes().get(33);
internal_static_hbase_pb_ModifyNamespaceResponse_fieldAccessorTable = new
diff --git hbase-protocol/src/main/protobuf/Master.proto hbase-protocol/src/main/protobuf/Master.proto
index 778a02a..eb9893a 100644
--- hbase-protocol/src/main/protobuf/Master.proto
+++ hbase-protocol/src/main/protobuf/Master.proto
@@ -177,6 +177,8 @@ message ModifyTableResponse {
message CreateNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
+ optional uint64 nonce_group = 2 [default = 0];
+ optional uint64 nonce = 3 [default = 0];
}
message CreateNamespaceResponse {
@@ -184,6 +186,8 @@ message CreateNamespaceResponse {
message DeleteNamespaceRequest {
required string namespaceName = 1;
+ optional uint64 nonce_group = 2 [default = 0];
+ optional uint64 nonce = 3 [default = 0];
}
message DeleteNamespaceResponse {
@@ -191,6 +195,8 @@ message DeleteNamespaceResponse {
message ModifyNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
+ optional uint64 nonce_group = 2 [default = 0];
+ optional uint64 nonce = 3 [default = 0];
}
message ModifyNamespaceResponse {
diff --git hbase-protocol/src/main/protobuf/MasterProcedure.proto hbase-protocol/src/main/protobuf/MasterProcedure.proto
index c445434..2d2aff4 100644
--- hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -110,6 +110,42 @@ message DeleteTableStateData {
repeated RegionInfo region_info = 3;
}
+enum CreateNamespaceState {
+ CREATE_NAMESPACE_PREPARE = 1;
+ CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
+ CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
+ CREATE_NAMESPACE_UPDATE_ZK = 4;
+ CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
+}
+
+message CreateNamespaceStateData {
+ required NamespaceDescriptor namespace_descriptor = 1;
+}
+
+enum ModifyNamespaceState {
+ MODIFY_NAMESPACE_PREPARE = 1;
+ MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
+ MODIFY_NAMESPACE_UPDATE_ZK = 3;
+}
+
+message ModifyNamespaceStateData {
+ required NamespaceDescriptor namespace_descriptor = 1;
+ optional NamespaceDescriptor unmodified_namespace_descriptor = 2;
+}
+
+enum DeleteNamespaceState {
+ DELETE_NAMESPACE_PREPARE = 1;
+ DELETE_NAMESPACE_DELETE_FROM_NS_TABLE = 2;
+ DELETE_NAMESPACE_REMOVE_FROM_ZK = 3;
+ DELETE_NAMESPACE_DELETE_DIRECTORIES = 4;
+ DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA = 5;
+}
+
+message DeleteNamespaceStateData {
+ required string namespace_name = 1;
+ optional NamespaceDescriptor namespace_descriptor = 2;
+}
+
enum AddColumnFamilyState {
ADD_COLUMN_FAMILY_PREPARE = 1;
ADD_COLUMN_FAMILY_PRE_OPERATION = 2;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
index 7f5faa6..b0aabb2 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
@@ -169,8 +169,13 @@ public class ZKNamespaceManager extends ZooKeeperListener {
try {
ZKUtil.deleteNode(watcher, zNode);
} catch (KeeperException e) {
- LOG.error("Failed updating permissions for namespace "+name, e);
- throw new IOException("Failed updating permissions for namespace "+name, e);
+ if (e instanceof KeeperException.NoNodeException) {
+ // If the node does not exist, it could be already deleted. Continue without fail.
+ LOG.warn("The ZNode " + zNode + " for namespace " + name + " does not exist.");
+ } else {
+ LOG.error("Failed updating permissions for namespace " + name, e);
+ throw new IOException("Failed updating permissions for namespace " + name, e);
+ }
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ca721e2..34776d5 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -91,14 +91,17 @@ import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure;
+import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
+import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
+import org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure;
import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
@@ -1037,6 +1040,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
return tableStateManager;
}
+ @Override
+ public TableNamespaceManager getTableNamespaceManager() {
+ return tableNamespaceManager;
+ }
+
/*
* Start up all services. If any of these threads gets an unhandled exception
* then they just die with a logged message. This should be fine because
@@ -2191,7 +2199,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
void checkNamespaceManagerReady() throws IOException {
checkInitialized();
if (tableNamespaceManager == null ||
- !tableNamespaceManager.isTableAvailableAndInitialized()) {
+ !tableNamespaceManager.isTableAvailableAndInitialized(true)) {
throw new IOException("Table Namespace Manager not ready yet, try again later");
}
}
@@ -2332,7 +2340,10 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
- public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
+ public void createNamespace(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException {
TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
checkNamespaceManagerReady();
if (cpHost != null) {
@@ -2340,15 +2351,31 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
return;
}
}
- LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
- tableNamespaceManager.create(descriptor);
+ createNamespaceSync(descriptor, nonceGroup, nonce);
if (cpHost != null) {
cpHost.postCreateNamespace(descriptor);
}
}
@Override
- public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
+ public void createNamespaceSync(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException {
+ LOG.info(getClientIdAuditPrefix() + " creating " + descriptor);
+ // Execute the operation synchronously - wait for the operation to complete before continuing.
+ long procId = this.procedureExecutor.submitProcedure(
+ new CreateNamespaceProcedure(procedureExecutor.getEnvironment(), descriptor),
+ nonceGroup,
+ nonce);
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
+ }
+
+ @Override
+ public void modifyNamespace(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException {
TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName()));
checkNamespaceManagerReady();
if (cpHost != null) {
@@ -2357,14 +2384,22 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
- tableNamespaceManager.update(descriptor);
+ // Execute the operation synchronously - wait for the operation to complete before continuing.
+ long procId = this.procedureExecutor.submitProcedure(
+ new ModifyNamespaceProcedure(procedureExecutor.getEnvironment(), descriptor),
+ nonceGroup,
+ nonce);
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
if (cpHost != null) {
cpHost.postModifyNamespace(descriptor);
}
}
@Override
- public void deleteNamespace(String name) throws IOException {
+ public void deleteNamespace(
+ final String name,
+ final long nonceGroup,
+ final long nonce) throws IOException {
checkNamespaceManagerReady();
if (cpHost != null) {
if (cpHost.preDeleteNamespace(name)) {
@@ -2372,7 +2407,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
LOG.info(getClientIdAuditPrefix() + " delete " + name);
- tableNamespaceManager.remove(name);
+ // Execute the operation synchronously - wait for the operation to complete before continuing.
+ long procId = this.procedureExecutor.submitProcedure(
+ new DeleteNamespaceProcedure(procedureExecutor.getEnvironment(), name),
+ nonceGroup,
+ nonce);
+ ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId);
if (cpHost != null) {
cpHost.postDeleteNamespace(name);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index beda27c..c6b1f00 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -417,7 +417,10 @@ public class MasterRpcServices extends RSRpcServices
public CreateNamespaceResponse createNamespace(RpcController controller,
CreateNamespaceRequest request) throws ServiceException {
try {
- master.createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
+ master.createNamespace(
+ ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
+ request.getNonceGroup(),
+ request.getNonce());
return CreateNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
@@ -457,7 +460,10 @@ public class MasterRpcServices extends RSRpcServices
public DeleteNamespaceResponse deleteNamespace(RpcController controller,
DeleteNamespaceRequest request) throws ServiceException {
try {
- master.deleteNamespace(request.getNamespaceName());
+ master.deleteNamespace(
+ request.getNamespaceName(),
+ request.getNonceGroup(),
+ request.getNonce());
return DeleteNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
@@ -1113,7 +1119,9 @@ public class MasterRpcServices extends RSRpcServices
ModifyNamespaceRequest request) throws ServiceException {
try {
master.modifyNamespace(
- ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
+ ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()),
+ request.getNonceGroup(),
+ request.getNonce());
return ModifyNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 7d70dc1..626f8c8 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -79,6 +79,11 @@ public interface MasterServices extends Server {
MasterCoprocessorHost getMasterCoprocessorHost();
/**
+ * @return Master's instance of {@link TableNamespaceManager}
+ */
+ TableNamespaceManager getTableNamespaceManager();
+
+ /**
* @return Master's instance of {@link MasterQuotaManager}
*/
MasterQuotaManager getMasterQuotaManager();
@@ -270,23 +275,50 @@ public interface MasterServices extends Server {
/**
* Create a new namespace
* @param descriptor descriptor which describes the new namespace
+ * @param nonceGroup
+ * @param nonce
+ * @throws IOException
+ */
+ public void createNamespace(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException;
+
+ /**
+ * Create a new namespace synchronously.
+ * @param descriptor descriptor which describes the new namespace
+ * @param nonceGroup
+ * @param nonce
* @throws IOException
*/
- public void createNamespace(NamespaceDescriptor descriptor) throws IOException;
+ public void createNamespaceSync(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException;
/**
* Modify an existing namespace
* @param descriptor descriptor which updates the existing namespace
+ * @param nonceGroup
+ * @param nonce
* @throws IOException
*/
- public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException;
+ public void modifyNamespace(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException;
/**
* Delete an existing namespace. Only empty namespaces (no tables) can be removed.
* @param name namespace name
+ * @param nonceGroup
+ * @param nonce
* @throws IOException
*/
- public void deleteNamespace(String name) throws IOException;
+ public void deleteNamespace(
+ final String name,
+ final long nonceGroup,
+ final long nonce) throws IOException;
/**
* Get a namespace descriptor by name
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 02912b9..4b36f59 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -18,26 +18,23 @@
package org.apache.hadoop.hbase.master;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.NavigableSet;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.NamespaceExistException;
-import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZKNamespaceManager;
import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -49,12 +46,12 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
import com.google.common.collect.Sets;
@@ -70,18 +67,39 @@ public class TableNamespaceManager {
private Configuration conf;
private MasterServices masterServices;
- private Table nsTable;
+ private Table nsTable = null;
private ZKNamespaceManager zkNamespaceManager;
private boolean initialized;
+ private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
+
public static final String KEY_MAX_REGIONS = "hbase.namespace.quota.maxregions";
public static final String KEY_MAX_TABLES = "hbase.namespace.quota.maxtables";
static final String NS_INIT_TIMEOUT = "hbase.master.namespace.init.timeout";
static final int DEFAULT_NS_INIT_TIMEOUT = 300000;
+ /** Configuration key for time out for trying to acquire table locks */
+ private static final String TABLE_WRITE_LOCK_TIMEOUT_MS =
+ "hbase.table.write.lock.timeout.ms";
+ /** Configuration key for time out for trying to acquire table locks */
+ private static final String TABLE_READ_LOCK_TIMEOUT_MS =
+ "hbase.table.read.lock.timeout.ms";
+ private static final long DEFAULT_TABLE_WRITE_LOCK_TIMEOUT_MS = 600 * 1000; //10 min default
+ private static final long DEFAULT_TABLE_READ_LOCK_TIMEOUT_MS = 600 * 1000; //10 min default
+
+ private long exclusiveLockTimeoutMs;
+ private long sharedLockTimeoutMs;
+
public TableNamespaceManager(MasterServices masterServices) {
this.masterServices = masterServices;
this.conf = masterServices.getConfiguration();
+
+ this.exclusiveLockTimeoutMs = conf.getLong(
+ TABLE_WRITE_LOCK_TIMEOUT_MS,
+ DEFAULT_TABLE_WRITE_LOCK_TIMEOUT_MS);
+ this.sharedLockTimeoutMs = conf.getLong(
+ TABLE_READ_LOCK_TIMEOUT_MS,
+ DEFAULT_TABLE_READ_LOCK_TIMEOUT_MS);
}
public void start() throws IOException {
@@ -95,7 +113,7 @@ public class TableNamespaceManager {
// Wait for the namespace table to be initialized.
long startTime = EnvironmentEdgeManager.currentTime();
int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT);
- while (!isTableAvailableAndInitialized()) {
+ while (!isTableAvailableAndInitialized(false)) {
if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) {
// We can't do anything if ns is not online.
throw new IOException("Timedout " + timeout + "ms waiting for namespace table to "
@@ -109,28 +127,51 @@ public class TableNamespaceManager {
}
private synchronized Table getNamespaceTable() throws IOException {
- if (!isTableAvailableAndInitialized()) {
+ if (!isTableNamespaceManagerInitialized()) {
throw new IOException(this.getClass().getName() + " isn't ready to serve");
}
return nsTable;
}
+ private synchronized boolean acquireSharedLock() throws IOException {
+ try {
+ return rwLock.readLock().tryLock(sharedLockTimeoutMs, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException) new InterruptedIOException().initCause(e);
+ }
+ }
- public synchronized NamespaceDescriptor get(String name) throws IOException {
- if (!isTableAvailableAndInitialized()) return null;
- return zkNamespaceManager.get(name);
+ public synchronized void releaseSharedLock() {
+ rwLock.readLock().unlock();
}
- public synchronized void create(NamespaceDescriptor ns) throws IOException {
- create(getNamespaceTable(), ns);
+ public synchronized boolean acquireExclusiveLock() {
+ try {
+ return rwLock.writeLock().tryLock(exclusiveLockTimeoutMs, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ return false;
+ }
}
- public synchronized void update(NamespaceDescriptor ns) throws IOException {
- Table table = getNamespaceTable();
- if (get(table, ns.getName()) == null) {
- throw new NamespaceNotFoundException(ns.getName());
+ public synchronized void releaseExclusiveLock() {
+ rwLock.writeLock().unlock();
+ }
+
+ /*
+ * check whether a namespace has already existed.
+ */
+ public boolean doesNamespaceExist(final String namespaceName) throws IOException {
+ if (nsTable == null) {
+ throw new IOException(this.getClass().getName() + " isn't ready to serve");
+ }
+ return (get(nsTable, namespaceName) != null);
+ }
+
+ public synchronized NamespaceDescriptor get(String name) throws IOException {
+ if (!isTableNamespaceManagerInitialized()) {
+ return null;
}
- upsert(table, ns);
+ return zkNamespaceManager.get(name);
}
private NamespaceDescriptor get(Table table, String name) throws IOException {
@@ -145,78 +186,51 @@ public class TableNamespaceManager {
HBaseProtos.NamespaceDescriptor.parseFrom(val));
}
- private void create(Table table, NamespaceDescriptor ns) throws IOException {
- if (get(table, ns.getName()) != null) {
- throw new NamespaceExistException(ns.getName());
- }
- validateTableAndRegionCount(ns);
- FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
- fs.mkdirs(FSUtils.getNamespaceDir(
- masterServices.getMasterFileSystem().getRootDir(), ns.getName()));
- upsert(table, ns);
- if (this.masterServices.isInitialized()) {
- this.masterServices.getMasterQuotaManager().setNamespaceQuota(ns);
+ public void insertIntoNSTable(final NamespaceDescriptor ns) throws IOException {
+ if (nsTable == null) {
+ throw new IOException(this.getClass().getName() + " isn't ready to serve");
}
- }
-
- private void upsert(Table table, NamespaceDescriptor ns) throws IOException {
- validateTableAndRegionCount(ns);
Put p = new Put(Bytes.toBytes(ns.getName()));
p.addImmutable(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
HTableDescriptor.NAMESPACE_COL_DESC_BYTES,
ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
- table.put(p);
+ nsTable.put(p);
+ }
+
+ public void updateZKNamespaceManager(final NamespaceDescriptor ns) throws IOException {
try {
zkNamespaceManager.update(ns);
- } catch(IOException ex) {
- String msg = "Failed to update namespace information in ZK. Aborting.";
- LOG.fatal(msg, ex);
- masterServices.abort(msg, ex);
+ } catch (IOException ex) {
+ String msg = "Failed to update namespace information in ZK.";
+ LOG.error(msg, ex);
+ throw new IOException(msg, ex);
}
}
- public synchronized void remove(String name) throws IOException {
- if (get(name) == null) {
- throw new NamespaceNotFoundException(name);
- }
- if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(name)) {
- throw new ConstraintException("Reserved namespace "+name+" cannot be removed.");
- }
- int tableCount;
- try {
- tableCount = masterServices.listTableDescriptorsByNamespace(name).size();
- } catch (FileNotFoundException fnfe) {
- throw new NamespaceNotFoundException(name);
- }
- if (tableCount > 0) {
- throw new ConstraintException("Only empty namespaces can be removed. " +
- "Namespace "+name+" has "+tableCount+" tables");
- }
- Delete d = new Delete(Bytes.toBytes(name));
- getNamespaceTable().delete(d);
- //don't abort if cleanup isn't complete
- //it will be replaced on new namespace creation
- zkNamespaceManager.remove(name);
- FileSystem fs = masterServices.getMasterFileSystem().getFileSystem();
- for(FileStatus status :
- fs.listStatus(FSUtils.getNamespaceDir(
- masterServices.getMasterFileSystem().getRootDir(), name))) {
- if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
- throw new IOException("Namespace directory contains table dir: "+status.getPath());
- }
- }
- if (!fs.delete(FSUtils.getNamespaceDir(
- masterServices.getMasterFileSystem().getRootDir(), name), true)) {
- throw new IOException("Failed to remove namespace: "+name);
+ public void removeFromNSTable(final String namespaceName) throws IOException {
+ if (nsTable == null) {
+ throw new IOException(this.getClass().getName() + " isn't ready to serve");
}
- this.masterServices.getMasterQuotaManager().removeNamespaceQuota(name);
+ Delete d = new Delete(Bytes.toBytes(namespaceName));
+ nsTable.delete(d);
+ }
+
+ public void removeFromZKNamespaceManager(final String namespaceName) throws IOException {
+ zkNamespaceManager.remove(namespaceName);
}
public synchronized NavigableSet list() throws IOException {
NavigableSet ret =
Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
- ResultScanner scanner = getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
+ ResultScanner scanner =
+ getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
+ boolean locked = false;
try {
+ locked = acquireSharedLock();
+ if (!locked) {
+ throw new IOException(
+ "Fail to acquire lock to scan namespace list. Some namespace DDL is in progress.");
+ }
for(Result r : scanner) {
byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(
HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
@@ -226,6 +240,9 @@ public class TableNamespaceManager {
}
} finally {
scanner.close();
+ if (locked) {
+ releaseSharedLock();
+ }
}
return ret;
}
@@ -242,6 +259,15 @@ public class TableNamespaceManager {
newRegions));
}
+ @SuppressWarnings("deprecation")
+ private boolean isTableNamespaceManagerInitialized() throws IOException {
+ if (initialized) {
+ this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
+ return true;
+ }
+ return false;
+ }
+
/**
* This method checks if the namespace table is assigned and then
* tries to create its HTable. If it was already created before, it also makes
@@ -251,25 +277,55 @@ public class TableNamespaceManager {
* @throws IOException
*/
@SuppressWarnings("deprecation")
- public synchronized boolean isTableAvailableAndInitialized() throws IOException {
+ public synchronized boolean isTableAvailableAndInitialized(
+ final boolean createNamespaceAync) throws IOException {
// Did we already get a table? If so, still make sure it's available
- if (initialized) {
- this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
+ if (isTableNamespaceManagerInitialized()) {
return true;
}
// Now check if the table is assigned, if not then fail fast
if (isTableAssigned() && isTableEnabled()) {
try {
+ boolean initGoodSofar = true;
nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper());
zkNamespaceManager.start();
if (get(nsTable, NamespaceDescriptor.DEFAULT_NAMESPACE.getName()) == null) {
- create(nsTable, NamespaceDescriptor.DEFAULT_NAMESPACE);
+ if (createNamespaceAync) {
+ masterServices.getMasterProcedureExecutor().submitProcedure(
+ new CreateNamespaceProcedure(
+ masterServices.getMasterProcedureExecutor().getEnvironment(),
+ NamespaceDescriptor.DEFAULT_NAMESPACE));
+ initGoodSofar = false;
+ }
+ else {
+ masterServices.createNamespaceSync(
+ NamespaceDescriptor.DEFAULT_NAMESPACE,
+ HConstants.NO_NONCE,
+ HConstants.NO_NONCE);
+ }
}
if (get(nsTable, NamespaceDescriptor.SYSTEM_NAMESPACE.getName()) == null) {
- create(nsTable, NamespaceDescriptor.SYSTEM_NAMESPACE);
+ if (createNamespaceAync) {
+ masterServices.getMasterProcedureExecutor().submitProcedure(
+ new CreateNamespaceProcedure(
+ masterServices.getMasterProcedureExecutor().getEnvironment(),
+ NamespaceDescriptor.SYSTEM_NAMESPACE));
+ initGoodSofar = false;
+ }
+ else {
+ masterServices.createNamespaceSync(
+ NamespaceDescriptor.SYSTEM_NAMESPACE,
+ HConstants.NO_NONCE,
+ HConstants.NO_NONCE);
+ }
+ }
+
+ if (!initGoodSofar) {
+ // some required namespace is created asynchronized. We should complete init later.
+ return false;
}
ResultScanner scanner = nsTable.getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
@@ -312,7 +368,7 @@ public class TableNamespaceManager {
.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty();
}
- void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException {
+ public void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException {
if (getMaxRegions(desc) <= 0) {
throw new ConstraintException("The max region quota for " + desc.getName()
+ " is less than or equal to zero.");
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
new file mode 100644
index 0000000..c91092a
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -0,0 +1,364 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceExistException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableNamespaceManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceState;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * The procedure to create a new namespace.
+ */
+@InterfaceAudience.Private
+public class CreateNamespaceProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(CreateNamespaceProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private NamespaceDescriptor nsDescriptor;
+ private Boolean traceEnabled;
+
+ public CreateNamespaceProcedure() {
+ this.traceEnabled = null;
+ }
+
+ public CreateNamespaceProcedure(
+ final MasterProcedureEnv env,
+ final NamespaceDescriptor nsDescriptor) throws IOException {
+ this.nsDescriptor = nsDescriptor;
+ this.traceEnabled = null;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final CreateNamespaceState state)
+ throws InterruptedException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case CREATE_NAMESPACE_PREPARE:
+ prepareCreate(env);
+ setNextState(CreateNamespaceState.CREATE_NAMESPACE_CREATE_DIRECTORY);
+ break;
+ case CREATE_NAMESPACE_CREATE_DIRECTORY:
+ createDirectory(env, nsDescriptor);
+ setNextState(CreateNamespaceState.CREATE_NAMESPACE_INSERT_INTO_NS_TABLE);
+ break;
+ case CREATE_NAMESPACE_INSERT_INTO_NS_TABLE:
+ insertIntoNSTable(env, nsDescriptor);
+ setNextState(CreateNamespaceState.CREATE_NAMESPACE_UPDATE_ZK);
+ break;
+ case CREATE_NAMESPACE_UPDATE_ZK:
+ updateZKNamespaceManager(env, nsDescriptor);
+ setNextState(CreateNamespaceState.CREATE_NAMESPACE_SET_NAMESPACE_QUOTA);
+ break;
+ case CREATE_NAMESPACE_SET_NAMESPACE_QUOTA:
+ setNamespaceQuota(env, nsDescriptor);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ LOG.warn("Error trying to create the namespace" + nsDescriptor.getName()
+ + " (in state=" + state + ")", e);
+
+ setFailure("master-create-namespace", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final CreateNamespaceState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case CREATE_NAMESPACE_SET_NAMESPACE_QUOTA:
+ rollbackSetNamespaceQuota(env);
+ break;
+ case CREATE_NAMESPACE_UPDATE_ZK:
+ rollbackZKNamespaceManagerChange(env);
+ break;
+ case CREATE_NAMESPACE_INSERT_INTO_NS_TABLE:
+ rollbackInsertIntoNSTable(env);
+ break;
+ case CREATE_NAMESPACE_CREATE_DIRECTORY:
+ rollbackCreateDirectory(env);
+ break;
+ case CREATE_NAMESPACE_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step " + state + " for creating the namespace "
+ + nsDescriptor.getName(), e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected CreateNamespaceState getState(final int stateId) {
+ return CreateNamespaceState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final CreateNamespaceState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected CreateNamespaceState getInitialState() {
+ return CreateNamespaceState.CREATE_NAMESPACE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(CreateNamespaceState state) {
+ if (aborted.get()) {
+ setAbortFailure("create-namespace", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.CreateNamespaceStateData.Builder createNamespaceMsg =
+ MasterProcedureProtos.CreateNamespaceStateData.newBuilder().setNamespaceDescriptor(
+ ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
+ createNamespaceMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.CreateNamespaceStateData createNamespaceMsg =
+ MasterProcedureProtos.CreateNamespaceStateData.parseDelimitedFrom(stream);
+ nsDescriptor = ProtobufUtil.toNamespaceDescriptor(createNamespaceMsg.getNamespaceDescriptor());
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (Namespace=");
+ sb.append(nsDescriptor.getName());
+ sb.append(")");
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!env.getMasterServices().isInitialized()) {
+ // Namespace manager might not be ready if master is not fully initialized,
+ // return false to reject user namespace creation; return true for default
+ // and system namespace creation (this is part of master initialization).
+ if (nsDescriptor.equals(NamespaceDescriptor.DEFAULT_NAMESPACE) ||
+ nsDescriptor.equals(NamespaceDescriptor.SYSTEM_NAMESPACE)) {
+ return true;
+ }
+
+ return false;
+ }
+ return getTableNamespaceManager(env).acquireExclusiveLock();
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ if (env.getMasterServices().isInitialized()) {
+ getTableNamespaceManager(env).releaseExclusiveLock();
+ }
+ }
+
+ @Override
+ public TableName getTableName() {
+ return TableName.NAMESPACE_TABLE_NAME;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Action before any real action of creating namespace.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareCreate(final MasterProcedureEnv env) throws IOException {
+ if (getTableNamespaceManager(env).doesNamespaceExist(nsDescriptor.getName())) {
+ throw new NamespaceExistException(nsDescriptor.getName());
+ }
+ getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
+ }
+
+ /**
+ * Create the namespace directory
+ * @param env MasterProcedureEnv
+ * @param nsDescriptor NamespaceDescriptor
+ * @throws IOException
+ */
+ protected static void createDirectory(
+ final MasterProcedureEnv env,
+ final NamespaceDescriptor nsDescriptor) throws IOException {
+ MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ mfs.getFileSystem().mkdirs(
+ FSUtils.getNamespaceDir(mfs.getRootDir(), nsDescriptor.getName()));
+ }
+
+ /**
+ * undo create directory
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void rollbackCreateDirectory(final MasterProcedureEnv env) throws IOException {
+ try {
+ DeleteNamespaceProcedure.deleteDirectory(env, nsDescriptor.getName());
+ } catch (Exception e) {
+ // Ignore exception
+ LOG.debug("Rollback of createDirectory throws exception: " + e);
+ }
+ }
+
+ /**
+ * Insert the row into ns table
+ * @param env MasterProcedureEnv
+ * @param nsDescriptor NamespaceDescriptor
+ * @throws IOException
+ */
+ protected static void insertIntoNSTable(
+ final MasterProcedureEnv env,
+ final NamespaceDescriptor nsDescriptor) throws IOException {
+ getTableNamespaceManager(env).insertIntoNSTable(nsDescriptor);
+ }
+
+ /**
+ * Undo the insert.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void rollbackInsertIntoNSTable(final MasterProcedureEnv env) throws IOException {
+ try {
+ DeleteNamespaceProcedure.deleteFromNSTable(env, nsDescriptor.getName());
+ } catch (Exception e) {
+ // Ignore exception
+ LOG.debug("Rollback of insertIntoNSTable throws exception: " + e);
+ }
+ }
+
+ /**
+ * Update Zookeeper.
+ * @param env MasterProcedureEnv
+ * @param nsDescriptor NamespaceDescriptor
+ * @throws IOException
+ */
+ protected static void updateZKNamespaceManager(
+ final MasterProcedureEnv env,
+ final NamespaceDescriptor nsDescriptor) throws IOException {
+ getTableNamespaceManager(env).updateZKNamespaceManager(nsDescriptor);
+ }
+
+ /**
+ * rollback Zookeeper update.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void rollbackZKNamespaceManagerChange(final MasterProcedureEnv env) throws IOException {
+ try {
+ DeleteNamespaceProcedure.removeFromZKNamespaceManager(env, nsDescriptor.getName());
+ } catch (Exception e) {
+ // Ignore exception
+ LOG.debug("Rollback of updateZKNamespaceManager throws exception: " + e);
+ }
+ }
+
+ /**
+ * Set quota for the namespace
+ * @param env MasterProcedureEnv
+ * @param nsDescriptor NamespaceDescriptor
+ * @throws IOException
+ **/
+ protected static void setNamespaceQuota(
+ final MasterProcedureEnv env,
+ final NamespaceDescriptor nsDescriptor) throws IOException {
+ if (env.getMasterServices().isInitialized()) {
+ env.getMasterServices().getMasterQuotaManager().setNamespaceQuota(nsDescriptor);
+ }
+ }
+
+ /**
+ * remove quota for the namespace if exists
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void rollbackSetNamespaceQuota(final MasterProcedureEnv env) throws IOException {
+ try {
+ DeleteNamespaceProcedure.removeNamespaceQuota(env, nsDescriptor.getName());
+ } catch (Exception e) {
+ // Ignore exception
+ LOG.debug("Rollback of setNamespaceQuota throws exception: " + e);
+ }
+ }
+
+ private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
+ return env.getMasterServices().getTableNamespaceManager();
+ }
+
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+}
\ No newline at end of file
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
new file mode 100644
index 0000000..8715b0b
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableNamespaceManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * The procedure to remove a namespace.
+ */
+@InterfaceAudience.Private
+public class DeleteNamespaceProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DeleteNamespaceProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private NamespaceDescriptor nsDescriptor;
+ private String namespaceName;
+ private Boolean traceEnabled;
+
+ public DeleteNamespaceProcedure() {
+ this.nsDescriptor = null;
+ this.traceEnabled = null;
+ }
+
+ public DeleteNamespaceProcedure(
+ final MasterProcedureEnv env,
+ final String namespaceName) throws IOException {
+ this.namespaceName = namespaceName;
+ this.nsDescriptor = null;
+ this.traceEnabled = null;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final DeleteNamespaceState state)
+ throws InterruptedException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case DELETE_NAMESPACE_PREPARE:
+ prepareDelete(env);
+ setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_FROM_NS_TABLE);
+ break;
+ case DELETE_NAMESPACE_DELETE_FROM_NS_TABLE:
+ deleteFromNSTable(env, namespaceName);
+ setNextState(DeleteNamespaceState.DELETE_NAMESPACE_REMOVE_FROM_ZK);
+ break;
+ case DELETE_NAMESPACE_REMOVE_FROM_ZK:
+ removeFromZKNamespaceManager(env, namespaceName);
+ setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_DIRECTORIES);
+ break;
+ case DELETE_NAMESPACE_DELETE_DIRECTORIES:
+ deleteDirectory(env, namespaceName);
+ setNextState(DeleteNamespaceState.DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA);
+ break;
+ case DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA:
+ removeNamespaceQuota(env, namespaceName);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ LOG.warn("Error trying to delete the namespace" + namespaceName
+ + " (in state=" + state + ")", e);
+
+ setFailure("master-delete-namespace", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final DeleteNamespaceState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA:
+ rollbacRemoveNamespaceQuota(env);
+ break;
+ case DELETE_NAMESPACE_DELETE_DIRECTORIES:
+ rollbackDeleteDirectory(env);
+ break;
+ case DELETE_NAMESPACE_REMOVE_FROM_ZK:
+ undoRemoveFromZKNamespaceManager(env);
+ break;
+ case DELETE_NAMESPACE_DELETE_FROM_NS_TABLE:
+ undoDeleteFromNSTable(env);
+ break;
+ case DELETE_NAMESPACE_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step " + state + " for deleting the namespace "
+ + namespaceName, e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected DeleteNamespaceState getState(final int stateId) {
+ return DeleteNamespaceState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final DeleteNamespaceState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected DeleteNamespaceState getInitialState() {
+ return DeleteNamespaceState.DELETE_NAMESPACE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(DeleteNamespaceState state) {
+ if (aborted.get()) {
+ setAbortFailure("delete-namespace", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DeleteNamespaceStateData.Builder deleteNamespaceMsg =
+ MasterProcedureProtos.DeleteNamespaceStateData.newBuilder().setNamespaceName(namespaceName);
+ if (this.nsDescriptor != null) {
+ deleteNamespaceMsg.setNamespaceDescriptor(
+ ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
+ }
+ deleteNamespaceMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.DeleteNamespaceStateData deleteNamespaceMsg =
+ MasterProcedureProtos.DeleteNamespaceStateData.parseDelimitedFrom(stream);
+ namespaceName = deleteNamespaceMsg.getNamespaceName();
+ if (deleteNamespaceMsg.hasNamespaceDescriptor()) {
+ nsDescriptor =
+ ProtobufUtil.toNamespaceDescriptor(deleteNamespaceMsg.getNamespaceDescriptor());
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (Namespace=");
+ sb.append(namespaceName);
+ sb.append(")");
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ return getTableNamespaceManager(env).acquireExclusiveLock();
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ getTableNamespaceManager(env).releaseExclusiveLock();
+ }
+
+ @Override
+ public TableName getTableName() {
+ return TableName.NAMESPACE_TABLE_NAME;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Action before any real action of deleting namespace.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareDelete(final MasterProcedureEnv env) throws IOException {
+ if (getTableNamespaceManager(env).doesNamespaceExist(namespaceName) == false) {
+ throw new NamespaceNotFoundException(namespaceName);
+ }
+ if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(namespaceName)) {
+ throw new ConstraintException("Reserved namespace "+ namespaceName +" cannot be removed.");
+ }
+
+ int tableCount = 0;
+ try {
+ tableCount = env.getMasterServices().listTableDescriptorsByNamespace(namespaceName).size();
+ } catch (FileNotFoundException fnfe) {
+ throw new NamespaceNotFoundException(namespaceName);
+ }
+ if (tableCount > 0) {
+ throw new ConstraintException("Only empty namespaces can be removed. " +
+ "Namespace "+ namespaceName + " has "+ tableCount +" tables");
+ }
+
+ // This is used for rollback
+ nsDescriptor = getTableNamespaceManager(env).get(namespaceName);
+ }
+
+ /**
+ * delete the row from namespace table
+ * @param env MasterProcedureEnv
+ * @param namespaceName name of the namespace in string format
+ * @throws IOException
+ */
+ protected static void deleteFromNSTable(
+ final MasterProcedureEnv env,
+ final String namespaceName) throws IOException {
+ getTableNamespaceManager(env).removeFromNSTable(namespaceName);
+ }
+
+ /**
+ * undo the delete
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void undoDeleteFromNSTable(final MasterProcedureEnv env) {
+ try {
+ if (nsDescriptor != null) {
+ CreateNamespaceProcedure.insertIntoNSTable(env, nsDescriptor);
+ }
+ } catch (Exception e) {
+ // Ignore
+ LOG.debug("Rollback of deleteFromNSTable throws exception: " + e);
+ }
+ }
+
+ /**
+ * remove from Zookeeper.
+ * @param env MasterProcedureEnv
+ * @param namespaceName name of the namespace in string format
+ * @throws IOException
+ */
+ protected static void removeFromZKNamespaceManager(
+ final MasterProcedureEnv env,
+ final String namespaceName) throws IOException {
+ getTableNamespaceManager(env).removeFromZKNamespaceManager(namespaceName);
+ }
+
+ /**
+ * undo the remove from Zookeeper
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void undoRemoveFromZKNamespaceManager(final MasterProcedureEnv env) {
+ try {
+ if (nsDescriptor != null) {
+ CreateNamespaceProcedure.updateZKNamespaceManager(env, nsDescriptor);
+ }
+ } catch (Exception e) {
+ // Ignore
+ LOG.debug("Rollback of removeFromZKNamespaceManager throws exception: " + e);
+ }
+ }
+
+ /**
+ * Delete the namespace directories from the file system
+ * @param env MasterProcedureEnv
+ * @param namespaceName name of the namespace in string format
+ * @throws IOException
+ */
+ protected static void deleteDirectory(
+ final MasterProcedureEnv env,
+ final String namespaceName) throws IOException {
+ MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ FileSystem fs = mfs.getFileSystem();
+ Path p = FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName);
+
+ try {
+ for(FileStatus status : fs.listStatus(p)) {
+ if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
+ throw new IOException("Namespace directory contains table dir: " + status.getPath());
+ }
+ }
+ if (!fs.delete(FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName), true)) {
+ throw new IOException("Failed to remove namespace: " + namespaceName);
+ }
+ } catch (FileNotFoundException e) {
+ // File already deleted, continue
+ LOG.debug("deleteDirectory throws exception: " + e);
+ }
+ }
+
+ /**
+ * undo delete directory
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void rollbackDeleteDirectory(final MasterProcedureEnv env) throws IOException {
+ try {
+ CreateNamespaceProcedure.createDirectory(env, nsDescriptor);
+ } catch (Exception e) {
+ // Ignore exception
+ LOG.debug("Rollback of deleteDirectory throws exception: " + e);
+ }
+ }
+
+ /**
+ * remove quota for the namespace
+ * @param env MasterProcedureEnv
+ * @param namespaceName name of the namespace in string format
+ * @throws IOException
+ **/
+ protected static void removeNamespaceQuota(
+ final MasterProcedureEnv env,
+ final String namespaceName) throws IOException {
+ env.getMasterServices().getMasterQuotaManager().removeNamespaceQuota(namespaceName);
+ }
+
+ /**
+ * undo remove quota for the namespace
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ **/
+ private void rollbacRemoveNamespaceQuota(final MasterProcedureEnv env) throws IOException {
+ try {
+ CreateNamespaceProcedure.setNamespaceQuota(env, nsDescriptor);
+ } catch (Exception e) {
+ // Ignore exception
+ LOG.debug("Rollback of removeNamespaceQuota throws exception: " + e);
+ }
+ }
+
+ private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
+ return env.getMasterServices().getTableNamespaceManager();
+ }
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+}
\ No newline at end of file
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
new file mode 100644
index 0000000..ba5c0ad
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
@@ -0,0 +1,281 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.TableNamespaceManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceState;
+
+/**
+ * The procedure to add a namespace to an existing table.
+ */
+@InterfaceAudience.Private
+public class ModifyNamespaceProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(ModifyNamespaceProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+ private NamespaceDescriptor oldNsDescriptor;
+ private NamespaceDescriptor newNsDescriptor;
+ private Boolean traceEnabled;
+
+ public ModifyNamespaceProcedure() {
+ this.oldNsDescriptor = null;
+ this.traceEnabled = null;
+ }
+
+ public ModifyNamespaceProcedure(
+ final MasterProcedureEnv env,
+ final NamespaceDescriptor newNsDescriptor) throws IOException {
+ this.oldNsDescriptor = null;
+ this.newNsDescriptor = newNsDescriptor;
+ this.traceEnabled = null;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final ModifyNamespaceState state)
+ throws InterruptedException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+
+ try {
+ switch (state) {
+ case MODIFY_NAMESPACE_PREPARE:
+ prepareModify(env);
+ setNextState(ModifyNamespaceState.MODIFY_NAMESPACE_UPDATE_NS_TABLE);
+ break;
+ case MODIFY_NAMESPACE_UPDATE_NS_TABLE:
+ insertIntoNSTable(env);
+ setNextState(ModifyNamespaceState.MODIFY_NAMESPACE_UPDATE_ZK);
+ break;
+ case MODIFY_NAMESPACE_UPDATE_ZK:
+ updateZKNamespaceManager(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ LOG.warn("Error trying to modify the namespace" + newNsDescriptor.getName()
+ + " (in state=" + state + ")", e);
+
+ setFailure("master-modify-namespace", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final ModifyNamespaceState state)
+ throws IOException {
+ if (isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case MODIFY_NAMESPACE_UPDATE_ZK:
+ rollbackZKNamespaceManagerChange(env);
+ break;
+ case MODIFY_NAMESPACE_UPDATE_NS_TABLE:
+ rollbackUpdateInNSTable(env);
+ break;
+ case MODIFY_NAMESPACE_PREPARE:
+ break; // nothing to do
+ default:
+ throw new UnsupportedOperationException(this + " unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step " + state + " for creating the namespace "
+ + newNsDescriptor.getName(), e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected ModifyNamespaceState getState(final int stateId) {
+ return ModifyNamespaceState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final ModifyNamespaceState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected ModifyNamespaceState getInitialState() {
+ return ModifyNamespaceState.MODIFY_NAMESPACE_PREPARE;
+ }
+
+ @Override
+ protected void setNextState(ModifyNamespaceState state) {
+ if (aborted.get()) {
+ setAbortFailure("modify-namespace", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.ModifyNamespaceStateData.Builder modifyNamespaceMsg =
+ MasterProcedureProtos.ModifyNamespaceStateData.newBuilder().setNamespaceDescriptor(
+ ProtobufUtil.toProtoNamespaceDescriptor(this.newNsDescriptor));
+ if (this.oldNsDescriptor != null) {
+ modifyNamespaceMsg.setUnmodifiedNamespaceDescriptor(
+ ProtobufUtil.toProtoNamespaceDescriptor(this.oldNsDescriptor));
+ }
+ modifyNamespaceMsg.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.ModifyNamespaceStateData modifyNamespaceMsg =
+ MasterProcedureProtos.ModifyNamespaceStateData.parseDelimitedFrom(stream);
+ newNsDescriptor =
+ ProtobufUtil.toNamespaceDescriptor(modifyNamespaceMsg.getNamespaceDescriptor());
+ if (modifyNamespaceMsg.hasUnmodifiedNamespaceDescriptor()) {
+ oldNsDescriptor =
+ ProtobufUtil.toNamespaceDescriptor(modifyNamespaceMsg.getUnmodifiedNamespaceDescriptor());
+ }
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (Namespace=");
+ sb.append(newNsDescriptor.getName());
+ sb.append(")");
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ return getTableNamespaceManager(env).acquireExclusiveLock();
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ getTableNamespaceManager(env).releaseExclusiveLock();
+ }
+
+ @Override
+ public TableName getTableName() {
+ return TableName.NAMESPACE_TABLE_NAME;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.EDIT;
+ }
+
+ /**
+ * Action before any real action of adding namespace.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void prepareModify(final MasterProcedureEnv env) throws IOException {
+ if (getTableNamespaceManager(env).doesNamespaceExist(newNsDescriptor.getName()) == false) {
+ throw new NamespaceNotFoundException(newNsDescriptor.getName());
+ }
+ getTableNamespaceManager(env).validateTableAndRegionCount(newNsDescriptor);
+
+ // This is used for rollback
+ oldNsDescriptor = getTableNamespaceManager(env).get(newNsDescriptor.getName());
+ }
+
+ /**
+ * Insert/update the row into namespace table
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void insertIntoNSTable(final MasterProcedureEnv env) throws IOException {
+ getTableNamespaceManager(env).insertIntoNSTable(newNsDescriptor);
+ }
+
+ /**
+ * rollback the row into namespace table
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void rollbackUpdateInNSTable(final MasterProcedureEnv env) throws IOException {
+ if (oldNsDescriptor != null) {
+ getTableNamespaceManager(env).insertIntoNSTable(oldNsDescriptor);
+ }
+ }
+
+ /**
+ * Update Zookeeper.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void updateZKNamespaceManager(final MasterProcedureEnv env) throws IOException {
+ getTableNamespaceManager(env).updateZKNamespaceManager(newNsDescriptor);
+ }
+
+ /**
+ * Update Zookeeper during undo.
+ * @param env MasterProcedureEnv
+ * @throws IOException
+ */
+ private void rollbackZKNamespaceManagerChange(final MasterProcedureEnv env) throws IOException {
+ if (oldNsDescriptor != null) {
+ getTableNamespaceManager(env).updateZKNamespaceManager(oldNsDescriptor);
+ }
+ }
+
+ private TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
+ return env.getMasterServices().getTableNamespaceManager();
+ }
+ /**
+ * The procedure could be restarted from a different machine. If the variable is null, we need to
+ * retrieve it.
+ * @return traceEnabled
+ */
+ private Boolean isTraceEnabled() {
+ if (traceEnabled == null) {
+ traceEnabled = LOG.isTraceEnabled();
+ }
+ return traceEnabled;
+ }
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index bc9af45..33a64a5 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -395,17 +395,34 @@ public class TestCatalogJanitor {
}
@Override
- public void createNamespace(NamespaceDescriptor descriptor) throws IOException {
+ public void createNamespace(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException {
+ //To change body of implemented methods use File | Settings | File Templates.
+ }
+
+ @Override
+ public void createNamespaceSync(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
- public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException {
+ public void modifyNamespace(
+ final NamespaceDescriptor descriptor,
+ final long nonceGroup,
+ final long nonce) throws IOException {
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
- public void deleteNamespace(String name) throws IOException {
+ public void deleteNamespace(
+ final String name,
+ final long nonceGroup,
+ final long nonce) throws IOException {
//To change body of implemented methods use File | Settings | File Templates.
}
@@ -504,6 +521,11 @@ public class TestCatalogJanitor {
}
@Override
+ public TableNamespaceManager getTableNamespaceManager() {
+ return null;
+ }
+
+ @Override
public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
boolean forcible) throws IOException {
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 394e339..b3f29db 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.NonceGenerator;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableState;
@@ -54,7 +53,6 @@ import org.apache.hadoop.hbase.util.MD5Hash;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
public class MasterProcedureTestingUtility {
private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java
new file mode 100644
index 0000000..d25e61f
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceExistException;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateNamespaceState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestCreateNamespaceProcedure {
+ private static final Log LOG = LogFactory.getLog(TestCreateNamespaceProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static long nonceGroup = HConstants.NO_NONCE;
+ private static long nonce = HConstants.NO_NONCE;
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ nonceGroup =
+ MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster());
+ nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ }
+
+ @Test(timeout = 60000)
+ public void testCreateNamespace() throws Exception {
+ final NamespaceDescriptor nsd = NamespaceDescriptor.create("testCreateNamespace").build();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ validateNamespaceCreated(nsd);
+ }
+
+ @Test(timeout=60000)
+ public void testCreateSameNamespaceTwice() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testCreateSameNamespaceTwice").build();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId1 = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ // Create the namespace that exists
+ long procId2 = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup + 1,
+ nonce + 1);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+
+ // Second create should fail with NamespaceExistException
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Create namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof NamespaceExistException);
+ }
+
+ @Test(timeout=60000)
+ public void testCreateSystemNamespace() throws Exception {
+ final NamespaceDescriptor nsd =
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Create namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof NamespaceExistException);
+ }
+
+ @Test(timeout=60000)
+ public void testCreateNamespaceWithInvalidRegionCount() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testCreateNamespaceWithInvalidRegionCount").build();
+ final String nsKey = "hbase.namespace.quota.maxregions";
+ final String nsValue = "-1";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ nsd.setConfiguration(nsKey, nsValue);
+
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Create namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof ConstraintException);
+ }
+
+ @Test(timeout=60000)
+ public void testCreateNamespaceWithInvalidTableCount() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testCreateNamespaceWithInvalidTableCount").build();
+ final String nsKey = "hbase.namespace.quota.maxtables";
+ final String nsValue = "-1";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ nsd.setConfiguration(nsKey, nsValue);
+
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Create namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof ConstraintException);
+ }
+
+ @Test(timeout=60000)
+ public void testCreateSameNamespaceTwiceWithSameNonce() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testCreateSameNamespaceTwiceWithSameNonce").build();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId1 = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ long procId2 = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ validateNamespaceCreated(nsd);
+
+ // Wait the completion and expect not fail - because it is the same proc
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+ assertTrue(procId1 == procId2);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testRecoveryAndDoubleExecution").build();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the CreateNamespace procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = CreateNamespaceState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ CreateNamespaceState.values());
+
+ // Validate the creation of namespace
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ validateNamespaceCreated(nsd);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testRollbackAndDoubleExecution").build();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the CreateNamespace procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+
+ int numberOfSteps = CreateNamespaceState.values().length - 2; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ CreateNamespaceState.values());
+
+ // Validate the non-existence of namespace
+ try {
+ NamespaceDescriptor nsDescriptor = UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName());
+ assertNull(nsDescriptor);
+ } catch (NamespaceNotFoundException nsnfe) {
+ // Expected
+ LOG.info("The namespace " + nsd.getName() + " is not created.");
+ }
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ private void validateNamespaceCreated(NamespaceDescriptor nsd) throws IOException {
+ NamespaceDescriptor createdNsDescriptor =
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName());
+ assertNotNull(createdNsDescriptor);
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java
new file mode 100644
index 0000000..dd22de7
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestDeleteNamespaceProcedure {
+ private static final Log LOG = LogFactory.getLog(TestDeleteNamespaceProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static long nonceGroup = HConstants.NO_NONCE;
+ private static long nonce = HConstants.NO_NONCE;
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ nonceGroup =
+ MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster());
+ nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testDeleteNamespace() throws Exception {
+ final String namespaceName = "testDeleteNamespace";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(namespaceName);
+
+ long procId = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ validateNamespaceNotExist(namespaceName);
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteNonExistNamespace() throws Exception {
+ final String namespaceName = "testDeleteNonExistNamespace";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ validateNamespaceNotExist(namespaceName);
+
+ long procId = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ // Expect fail with NamespaceNotFoundException
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof NamespaceNotFoundException);
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteSystemNamespace() throws Exception {
+ final String namespaceName = NamespaceDescriptor.SYSTEM_NAMESPACE.getName();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof ConstraintException);
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteNonEmptyNamespace() throws Exception {
+ final String namespaceName = "testDeleteNonExistNamespace";
+ final TableName tableName = TableName.valueOf("testDeleteNonExistNamespace:t1");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ // create namespace
+ createNamespaceForTesting(namespaceName);
+ // create the table under the new namespace
+ MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1");
+
+ long procId = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof ConstraintException);
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteSameNamespaceTwiceWithSameNonce() throws Exception {
+ final String namespaceName = "testDeleteSameNamespaceTwiceWithSameNonce";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(namespaceName);
+
+ long procId1 = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+ long procId2 = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ validateNamespaceNotExist(namespaceName);
+
+ // Wait the completion and expect not fail - because it is the same proc
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
+ assertTrue(procId1 == procId2);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final String namespaceName = "testRecoveryAndDoubleExecution";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(namespaceName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the DeleteNamespace procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = DeleteNamespaceState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ DeleteNamespaceState.values());
+
+ // Validate the deletion of namespace
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ validateNamespaceNotExist(namespaceName);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final String namespaceName = "testRollbackAndDoubleExecution";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(namespaceName);
+
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Start the DeleteNamespace procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteNamespaceProcedure(procExec.getEnvironment(), namespaceName),
+ nonceGroup,
+ nonce);
+
+ int numberOfSteps = DeleteNamespaceState.values().length - 2; // failing in the middle of proc
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ DeleteNamespaceState.values());
+
+ // Validate the namespace still exists
+ NamespaceDescriptor createdNsDescriptor=
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(namespaceName);
+ assertNotNull(createdNsDescriptor);
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ private void createNamespaceForTesting(final String namespaceName) throws Exception {
+ final NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build();
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup + 1,
+ nonce + 1);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ }
+
+ public static void validateNamespaceNotExist(final String nsName) throws IOException {
+ try {
+ NamespaceDescriptor nsDescriptor = UTIL.getHBaseAdmin().getNamespaceDescriptor(nsName);
+ assertNull(nsDescriptor);
+ } catch (NamespaceNotFoundException nsnfe) {
+ // Expected
+ }
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
new file mode 100644
index 0000000..e946043
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
@@ -0,0 +1,295 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+
+import static org.junit.Assert.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyNamespaceState;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestModifyNamespaceProcedure {
+ private static final Log LOG = LogFactory.getLog(TestModifyNamespaceProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static long nonceGroup = HConstants.NO_NONCE;
+ private static long nonce = HConstants.NO_NONCE;
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ nonceGroup =
+ MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster());
+ nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster());
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+
+ @Test(timeout = 60000)
+ public void testModifyNamespace() throws Exception {
+ final NamespaceDescriptor nsd = NamespaceDescriptor.create("testModifyNamespace").build();
+ final String nsKey1 = "hbase.namespace.quota.maxregions";
+ final String nsValue1before = "1111";
+ final String nsValue1after = "9999";
+ final String nsKey2 = "hbase.namespace.quota.maxtables";
+ final String nsValue2 = "10";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ nsd.setConfiguration(nsKey1, nsValue1before);
+ createNamespaceForTesting(nsd);
+
+ // Before modify
+ NamespaceDescriptor currentNsDescriptor =
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName());
+ assertEquals(currentNsDescriptor.getConfigurationValue(nsKey1), nsValue1before);
+ assertNull(currentNsDescriptor.getConfigurationValue(nsKey2));
+
+ // Update
+ nsd.setConfiguration(nsKey1, nsValue1after);
+ nsd.setConfiguration(nsKey2, nsValue2);
+
+ long procId1 = procExec.submitProcedure(
+ new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+
+ // Verify the namespace is updated.
+ currentNsDescriptor =
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName());
+ assertEquals(nsd.getConfigurationValue(nsKey1), nsValue1after);
+ assertEquals(currentNsDescriptor.getConfigurationValue(nsKey2), nsValue2);
+ }
+
+ @Test(timeout=60000)
+ public void testModifyNonExistNamespace() throws Exception {
+ final String namespaceName = "testModifyNonExistNamespace";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ try {
+ NamespaceDescriptor nsDescriptor = UTIL.getHBaseAdmin().getNamespaceDescriptor(namespaceName);
+ assertNull(nsDescriptor);
+ } catch (NamespaceNotFoundException nsnfe) {
+ // Expected
+ LOG.debug("The namespace " + namespaceName + " does not exist. This is expected.");
+ }
+
+ final NamespaceDescriptor nsd = NamespaceDescriptor.create(namespaceName).build();
+
+ long procId = procExec.submitProcedure(
+ new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Expect fail with NamespaceNotFoundException
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("modify namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof NamespaceNotFoundException);
+ }
+
+ @Test(timeout=60000)
+ public void testModifyNamespaceWithInvalidRegionCount() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testModifyNamespaceWithInvalidRegionCount").build();
+ final String nsKey = "hbase.namespace.quota.maxregions";
+ final String nsValue = "-1";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(nsd);
+
+ // Modify
+ nsd.setConfiguration(nsKey, nsValue);
+
+ long procId = procExec.submitProcedure(
+ new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Modify namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof ConstraintException);
+ }
+
+ @Test(timeout=60000)
+ public void testModifyNamespaceWithInvalidTableCount() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testModifyNamespaceWithInvalidTableCount").build();
+ final String nsKey = "hbase.namespace.quota.maxtables";
+ final String nsValue = "-1";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(nsd);
+
+ // Modify
+ nsd.setConfiguration(nsKey, nsValue);
+
+ long procId = procExec.submitProcedure(
+ new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureResult result = procExec.getResult(procId);
+ assertTrue(result.isFailed());
+ LOG.debug("Modify namespace failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof ConstraintException);
+ }
+
+ @Test(timeout = 60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testRecoveryAndDoubleExecution").build();
+ final String nsKey = "foo";
+ final String nsValue = "bar";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(nsd);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Modify
+ nsd.setConfiguration(nsKey, nsValue);
+
+ // Start the Modify procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+
+ // Restart the executor and execute the step twice
+ int numberOfSteps = ModifyNamespaceState.values().length;
+ MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyNamespaceState.values());
+
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ // Validate
+ NamespaceDescriptor currentNsDescriptor =
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName());
+ assertEquals(currentNsDescriptor.getConfigurationValue(nsKey), nsValue);
+ }
+
+ @Test(timeout = 60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final NamespaceDescriptor nsd =
+ NamespaceDescriptor.create("testRollbackAndDoubleExecution").build();
+ final String nsKey = "foo";
+ final String nsValue = "bar";
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ createNamespaceForTesting(nsd);
+ ProcedureTestingUtility.waitNoProcedureRunning(procExec);
+ ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+
+ // Modify
+ nsd.setConfiguration(nsKey, nsValue);
+
+ // Start the Modify procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd),
+ nonceGroup,
+ nonce);
+
+ // Failing in the middle of proc
+ int numberOfSteps = ModifyNamespaceState.values().length - 2;
+ MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
+ procExec,
+ procId,
+ numberOfSteps,
+ ModifyNamespaceState.values());
+
+ // Validate
+ NamespaceDescriptor currentNsDescriptor =
+ UTIL.getHBaseAdmin().getNamespaceDescriptor(nsd.getName());
+ assertNull(currentNsDescriptor.getConfigurationValue(nsKey));
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ private void createNamespaceForTesting(NamespaceDescriptor nsDescriptor) throws Exception {
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ long procId = procExec.submitProcedure(
+ new CreateNamespaceProcedure(procExec.getEnvironment(), nsDescriptor),
+ nonceGroup + 1,
+ nonce + 1);
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ }
+}