diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 0d33332..fb5e0ab 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -175,6 +175,7 @@
LoadBalancer.proto
MapReduce.proto
Master.proto
+ MasterProcedure.proto
MultiRowMutation.proto
Procedure.proto
Quota.proto
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
new file mode 100644
index 0000000..a960040
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java
@@ -0,0 +1,2386 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: MasterProcedure.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class MasterProcedureProtos {
+ private MasterProcedureProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface CreateTableStateOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ boolean hasUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableSchema table_schema = 2;
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ boolean hasTableSchema();
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema();
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder();
+
+ // repeated .RegionInfo region_info = 3;
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ java.util.List
+ getRegionInfoList();
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ int getRegionInfoCount();
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code CreateTableState}
+ */
+ public static final class CreateTableState extends
+ com.google.protobuf.GeneratedMessage
+ implements CreateTableStateOrBuilder {
+ // Use CreateTableState.newBuilder() to construct.
+ private CreateTableState(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CreateTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CreateTableState defaultInstance;
+ public static CreateTableState getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CreateTableState getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CreateTableState(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableSchema_.toBuilder();
+ }
+ tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableSchema_);
+ tableSchema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableState_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableState_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CreateTableState parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CreateTableState(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableSchema table_schema = 2;
+ public static final int TABLE_SCHEMA_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_;
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ return tableSchema_;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ return tableSchema_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ public static final int REGION_INFO_FIELD_NUMBER = 3;
+ private java.util.List regionInfo_;
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableSchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(3, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableSchema_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableSchema() == other.hasTableSchema());
+ if (hasTableSchema()) {
+ result = result && getTableSchema()
+ .equals(other.getTableSchema());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableSchema()) {
+ hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getTableSchema().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CreateTableState}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableState_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableState_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableSchemaFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableState_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableSchemaBuilder_ == null) {
+ result.tableSchema_ = tableSchema_;
+ } else {
+ result.tableSchema_ = tableSchemaBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableSchema()) {
+ mergeTableSchema(other.getTableSchema());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableSchema()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableSchema().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableSchema table_schema = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_;
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public boolean hasTableSchema() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ return tableSchema_;
+ } else {
+ return tableSchemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableSchema_ = value;
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder setTableSchema(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) {
+ if (tableSchemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) {
+ tableSchema_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial();
+ } else {
+ tableSchema_ = value;
+ }
+ onChanged();
+ } else {
+ tableSchemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public Builder clearTableSchema() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ tableSchemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() {
+ if (tableSchemaBuilder_ != null) {
+ return tableSchemaBuilder_.getMessageOrBuilder();
+ } else {
+ return tableSchema_;
+ }
+ }
+ /**
+ * required .TableSchema table_schema = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getTableSchemaFieldBuilder() {
+ if (tableSchemaBuilder_ == null) {
+ tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ tableSchema_,
+ getParentForChildren(),
+ isClean());
+ tableSchema_ = null;
+ }
+ return tableSchemaBuilder_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ private java.util.List regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList(regionInfo_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
+ } else {
+ return regionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addAllRegionInfo(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
+ } else {
+ regionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CreateTableState)
+ }
+
+ static {
+ defaultInstance = new CreateTableState(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CreateTableState)
+ }
+
+ public interface DeleteTableStateOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .UserInformation user_info = 1;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ boolean hasUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo();
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
+
+ // required .TableName table_name = 2;
+ /**
+ * required .TableName table_name = 2;
+ */
+ boolean hasTableName();
+ /**
+ * required .TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * required .TableName table_name = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+ // repeated .RegionInfo region_info = 3;
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ java.util.List
+ getRegionInfoList();
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ int getRegionInfoCount();
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList();
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code DeleteTableState}
+ */
+ public static final class DeleteTableState extends
+ com.google.protobuf.GeneratedMessage
+ implements DeleteTableStateOrBuilder {
+ // Use DeleteTableState.newBuilder() to construct.
+ private DeleteTableState(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private DeleteTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final DeleteTableState defaultInstance;
+ public static DeleteTableState getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DeleteTableState getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private DeleteTableState(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = userInfo_.toBuilder();
+ }
+ userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(userInfo_);
+ userInfo_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableState_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableState_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public DeleteTableState parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new DeleteTableState(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .UserInformation user_info = 1;
+ public static final int USER_INFO_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ return userInfo_;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ return userInfo_;
+ }
+
+ // required .TableName table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * required .TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ public static final int REGION_INFO_FIELD_NUMBER = 3;
+ private java.util.List regionInfo_;
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List getRegionInfoList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ return regionInfo_;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ return regionInfo_.size();
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ return regionInfo_.get(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ return regionInfo_.get(index);
+ }
+
+ private void initFields() {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ regionInfo_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasUserInfo()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, tableName_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ output.writeMessage(3, regionInfo_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, userInfo_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, tableName_);
+ }
+ for (int i = 0; i < regionInfo_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, regionInfo_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState) obj;
+
+ boolean result = true;
+ result = result && (hasUserInfo() == other.hasUserInfo());
+ if (hasUserInfo()) {
+ result = result && getUserInfo()
+ .equals(other.getUserInfo());
+ }
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && getRegionInfoList()
+ .equals(other.getRegionInfoList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasUserInfo()) {
+ hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getUserInfo().hashCode();
+ }
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (getRegionInfoCount() > 0) {
+ hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionInfoList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code DeleteTableState}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableState_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableState_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getUserInfoFieldBuilder();
+ getTableNameFieldBuilder();
+ getRegionInfoFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableState_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (userInfoBuilder_ == null) {
+ result.userInfo_ = userInfo_;
+ } else {
+ result.userInfo_ = userInfoBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ if (regionInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.regionInfo_ = regionInfo_;
+ } else {
+ result.regionInfo_ = regionInfoBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState.getDefaultInstance()) return this;
+ if (other.hasUserInfo()) {
+ mergeUserInfo(other.getUserInfo());
+ }
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ if (regionInfoBuilder_ == null) {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfo_.isEmpty()) {
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureRegionInfoIsMutable();
+ regionInfo_.addAll(other.regionInfo_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.regionInfo_.isEmpty()) {
+ if (regionInfoBuilder_.isEmpty()) {
+ regionInfoBuilder_.dispose();
+ regionInfoBuilder_ = null;
+ regionInfo_ = other.regionInfo_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ regionInfoBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getRegionInfoFieldBuilder() : null;
+ } else {
+ regionInfoBuilder_.addAllMessages(other.regionInfo_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasUserInfo()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!getUserInfo().isInitialized()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getRegionInfoCount(); i++) {
+ if (!getRegionInfo(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .UserInformation user_info = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public boolean hasUserInfo() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
+ if (userInfoBuilder_ == null) {
+ return userInfo_;
+ } else {
+ return userInfoBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ userInfo_ = value;
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder setUserInfo(
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ userInfoBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) {
+ if (userInfoBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
+ userInfo_ =
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
+ } else {
+ userInfo_ = value;
+ }
+ onChanged();
+ } else {
+ userInfoBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public Builder clearUserInfo() {
+ if (userInfoBuilder_ == null) {
+ userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
+ onChanged();
+ } else {
+ userInfoBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getUserInfoFieldBuilder().getBuilder();
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
+ if (userInfoBuilder_ != null) {
+ return userInfoBuilder_.getMessageOrBuilder();
+ } else {
+ return userInfo_;
+ }
+ }
+ /**
+ * required .UserInformation user_info = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>
+ getUserInfoFieldBuilder() {
+ if (userInfoBuilder_ == null) {
+ userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
+ userInfo_,
+ getParentForChildren(),
+ isClean());
+ userInfo_ = null;
+ }
+ return userInfoBuilder_;
+ }
+
+ // required .TableName table_name = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * required .TableName table_name = 2;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * required .TableName table_name = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // repeated .RegionInfo region_info = 3;
+ private java.util.List regionInfo_ =
+ java.util.Collections.emptyList();
+ private void ensureRegionInfoIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ regionInfo_ = new java.util.ArrayList(regionInfo_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List getRegionInfoList() {
+ if (regionInfoBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ } else {
+ return regionInfoBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public int getRegionInfoCount() {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.size();
+ } else {
+ return regionInfoBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index);
+ } else {
+ return regionInfoBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder setRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
+ if (regionInfoBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, value);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addRegionInfo(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ regionInfoBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder addAllRegionInfo(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo> values) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ super.addAll(values, regionInfo_);
+ onChanged();
+ } else {
+ regionInfoBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder clearRegionInfo() {
+ if (regionInfoBuilder_ == null) {
+ regionInfo_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ regionInfoBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public Builder removeRegionInfo(int index) {
+ if (regionInfoBuilder_ == null) {
+ ensureRegionInfoIsMutable();
+ regionInfo_.remove(index);
+ onChanged();
+ } else {
+ regionInfoBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+ int index) {
+ if (regionInfoBuilder_ == null) {
+ return regionInfo_.get(index); } else {
+ return regionInfoBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoOrBuilderList() {
+ if (regionInfoBuilder_ != null) {
+ return regionInfoBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(regionInfo_);
+ }
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+ return getRegionInfoFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+ int index) {
+ return getRegionInfoFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+ }
+ /**
+ * repeated .RegionInfo region_info = 3;
+ */
+ public java.util.List
+ getRegionInfoBuilderList() {
+ return getRegionInfoFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+ getRegionInfoFieldBuilder() {
+ if (regionInfoBuilder_ == null) {
+ regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+ regionInfo_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ regionInfo_ = null;
+ }
+ return regionInfoBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:DeleteTableState)
+ }
+
+ static {
+ defaultInstance = new DeleteTableState(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DeleteTableState)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CreateTableState_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CreateTableState_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DeleteTableState_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DeleteTableState_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" +
+ "C.proto\"}\n\020CreateTableState\022#\n\tuser_info" +
+ "\030\001 \002(\0132\020.UserInformation\022\"\n\014table_schema" +
+ "\030\002 \002(\0132\014.TableSchema\022 \n\013region_info\030\003 \003(" +
+ "\0132\013.RegionInfo\"y\n\020DeleteTableState\022#\n\tus" +
+ "er_info\030\001 \002(\0132\020.UserInformation\022\036\n\ntable" +
+ "_name\030\002 \002(\0132\n.TableName\022 \n\013region_info\030\003" +
+ " \003(\0132\013.RegionInfoBK\n*org.apache.hadoop.h" +
+ "base.protobuf.generatedB\025MasterProcedure" +
+ "ProtosH\001\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_CreateTableState_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_CreateTableState_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CreateTableState_descriptor,
+ new java.lang.String[] { "UserInfo", "TableSchema", "RegionInfo", });
+ internal_static_DeleteTableState_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_DeleteTableState_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_DeleteTableState_descriptor,
+ new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+ org.apache.hadoop.hbase.protobuf.generated.RPCProtos.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
new file mode 100644
index 0000000..eb40467
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "MasterProcedureProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+import "RPC.proto";
+
+message CreateTableState {
+ required UserInformation user_info = 1;
+ required TableSchema table_schema = 2;
+ repeated RegionInfo region_info = 3;
+}
+
+message DeleteTableState {
+ required UserInformation user_info = 1;
+ required TableName table_name = 2;
+ repeated RegionInfo region_info = 3;
+}
\ No newline at end of file
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 6df5293..3eb701d 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -304,6 +304,10 @@
org.apache.hbase
+ hbase-procedure
+
+
+ org.apache.hbase
hbase-client
@@ -324,6 +328,12 @@
test
+ org.apache.hbase
+ hbase-procedure
+ test-jar
+ test
+
+
commons-httpclient
commons-httpclient
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5762946..95771eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -91,8 +91,6 @@ import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
-import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
@@ -101,11 +99,17 @@ import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TruncateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
@@ -124,6 +128,7 @@ import org.apache.hadoop.hbase.util.EncryptionTest;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.HasThread;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
@@ -290,6 +295,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
private MasterQuotaManager quotaManager;
+ private ProcedureExecutor procedureExecutor;
+ private WALProcedureStore procedureStore;
+
// handle table states
private TableStateManager tableStateManager;
@@ -1001,6 +1009,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
// Any time changing this maxThreads to > 1, pls see the comment at
// AccessController#postCreateTableHandler
this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
+ startProcedureExecutor();
// Start log cleaner thread
int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
@@ -1033,6 +1042,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
super.stopServiceThreads();
stopChores();
+ stopProcedureExecutor();
// Wait for all the remaining region servers to report in IFF we were
// running a cluster shutdown AND we were NOT aborting.
if (!isAborted() && this.serverManager != null &&
@@ -1053,6 +1063,35 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (this.mpmHost != null) this.mpmHost.stop("server shutting down.");
}
+ private void startProcedureExecutor() throws IOException {
+ final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
+ final Path logDir = new Path(fileSystemManager.getRootDir(),
+ MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
+
+ procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
+ new MasterProcedureEnv.WALStoreLeaseRecovery(this));
+ procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
+ procEnv.getProcedureQueue());
+
+ final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
+ Math.max(Runtime.getRuntime().availableProcessors(),
+ MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
+ procedureStore.start(numThreads);
+ procedureExecutor.start(numThreads);
+ }
+
+ private void stopProcedureExecutor() {
+ if (procedureExecutor != null) {
+ procedureExecutor.stop();
+ procedureExecutor = null;
+ }
+
+ if (procedureStore != null) {
+ procedureStore.stop();
+ procedureStore = null;
+ }
+ }
+
private void stopChores() {
if (this.balancerChore != null) {
this.balancerChore.cancel(true);
@@ -1289,7 +1328,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
ensureNamespaceExists(namespace);
- HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
+ HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized();
sanityCheckTableDescriptor(hTableDescriptor);
this.quotaManager.checkNamespaceTableAndRegionQuota(hTableDescriptor.getTableName(),
@@ -1298,13 +1337,18 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preCreateTable(hTableDescriptor, newRegions);
}
LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
- this.service.submit(new CreateTableHandler(this,
- this.fileSystemManager, hTableDescriptor, conf,
- newRegions, this).prepare());
+
+ long procId = this.procedureExecutor.submitProcedure(
+ new CreateTableProcedure(procedureExecutor.getEnvironment(),
+ hTableDescriptor, newRegions));
+
if (cpHost != null) {
cpHost.postCreateTable(hTableDescriptor, newRegions);
}
+ // TODO: change the interface to return the procId,
+ // and add it to the response protobuf.
+ //return procId;
}
/**
@@ -1511,29 +1555,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
}
- private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
- byte[][] splitKeys) {
- long regionId = System.currentTimeMillis();
- HRegionInfo[] hRegionInfos = null;
- if (splitKeys == null || splitKeys.length == 0) {
- hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null,
- false, regionId)};
- } else {
- int numRegions = splitKeys.length + 1;
- hRegionInfos = new HRegionInfo[numRegions];
- byte[] startKey = null;
- byte[] endKey = null;
- for (int i = 0; i < numRegions; i++) {
- endKey = (i == splitKeys.length) ? null : splitKeys[i];
- hRegionInfos[i] =
- new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
- false, regionId);
- startKey = endKey;
- }
- }
- return hRegionInfos;
- }
-
private static boolean isCatalogTable(final TableName tableName) {
return tableName.equals(TableName.META_TABLE_NAME);
}
@@ -1545,10 +1566,17 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
cpHost.preDeleteTable(tableName);
}
LOG.info(getClientIdAuditPrefix() + " delete " + tableName);
- this.service.submit(new DeleteTableHandler(tableName, this, this).prepare());
+
+ long procId = this.procedureExecutor.submitProcedure(
+ new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName));
+
if (cpHost != null) {
cpHost.postDeleteTable(tableName);
}
+
+ // TODO: change the interface to return the procId,
+ // and add it to the response protobuf.
+ //return procId;
}
@Override
@@ -1847,6 +1875,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
@Override
+ public ProcedureExecutor getMasterProcedureExecutor() {
+ return procedureExecutor;
+ }
+
+ @Override
public ServerName getServerName() {
return this.serverName;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 63f3119..7352fe8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@ -82,6 +84,11 @@ public interface MasterServices extends Server {
MasterQuotaManager getMasterQuotaManager();
/**
+ * @return Master's instance of {@link ProcedureExecutor}
+ */
+ ProcedureExecutor getMasterProcedureExecutor();
+
+ /**
* Check table is modifiable; i.e. exists and is offline.
* @param tableName Name of table to check.
* @throws TableNotDisabledException
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 323ccee..1a49f65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.Bytes;
@@ -236,18 +236,15 @@ public class TableNamespaceManager {
}
private void createNamespaceTable(MasterServices masterServices) throws IOException {
- HRegionInfo newRegions[] = new HRegionInfo[]{
+ HRegionInfo[] newRegions = new HRegionInfo[]{
new HRegionInfo(HTableDescriptor.NAMESPACE_TABLEDESC.getTableName(), null, null)};
- //we need to create the table this way to bypass
- //checkInitialized
- masterServices.getExecutorService()
- .submit(new CreateTableHandler(masterServices,
- masterServices.getMasterFileSystem(),
- HTableDescriptor.NAMESPACE_TABLEDESC,
- masterServices.getConfiguration(),
- newRegions,
- masterServices).prepare());
+ // we need to create the table this way to bypass checkInitialized
+ masterServices.getMasterProcedureExecutor()
+ .submitProcedure(new CreateTableProcedure(
+ masterServices.getMasterProcedureExecutor().getEnvironment(),
+ HTableDescriptor.NAMESPACE_TABLEDESC,
+ newRegions));
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
new file mode 100644
index 0000000..8b7d1ef
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -0,0 +1,441 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.common.collect.Lists;
+
+@InterfaceAudience.Private
+public class CreateTableProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class);
+
+ enum CreateTableState {
+ PRE_CREATE,
+ CREATE_FS_LAYOUT,
+ ADD_TABLE_TO_META,
+ ASSIGN_REGIONS,
+ UPDATE_TABLE_DESC_CACHE,
+ POST_CREATE,
+ }
+
+ private AtomicBoolean aborted = new AtomicBoolean(false);
+ private HTableDescriptor hTableDescriptor;
+ private List newRegions;
+ private UserGroupInformation user;
+
+ public CreateTableProcedure() {}
+
+ public CreateTableProcedure(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions)
+ throws IOException {
+ // quick immediate check on table-exist on submit
+ // NOTE: this is only for compatibility with the old client that has a sync behaviour here
+ if (!markTableAsCreating(env, hTableDescriptor.getTableName())) {
+ throw new TableExistsException(hTableDescriptor.getTableName());
+ }
+
+ this.hTableDescriptor = hTableDescriptor;
+ this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null;
+ this.user = env.getRequestUser().getUGI();
+ }
+
+ protected Flow executeFromState(final MasterProcedureEnv env, CreateTableState state) {
+ if (state == null) {
+ state = CreateTableState.PRE_CREATE;
+ setNextState(state);
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case PRE_CREATE:
+ preCreate(env);
+ setNextState(CreateTableState.CREATE_FS_LAYOUT);
+ break;
+ case CREATE_FS_LAYOUT:
+ createFsLayout(env);
+ setNextState(CreateTableState.ADD_TABLE_TO_META);
+ break;
+ case ADD_TABLE_TO_META:
+ addTableToMeta(env);
+ setNextState(CreateTableState.ASSIGN_REGIONS);
+ break;
+ case ASSIGN_REGIONS:
+ assignRegions(env);
+ setNextState(CreateTableState.UPDATE_TABLE_DESC_CACHE);
+ break;
+ case UPDATE_TABLE_DESC_CACHE:
+ updateTableDescCache(env);
+ setNextState(CreateTableState.POST_CREATE);
+ break;
+ case POST_CREATE:
+ postCreate(env);
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (Exception e) {
+ LOG.error("Error trying to create table=" + getTableName() + " state=" + state, e);
+ setFailure("master-create-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " rollback state=" + state);
+ }
+ try {
+ switch (state) {
+ case POST_CREATE:
+ break;
+ case UPDATE_TABLE_DESC_CACHE:
+ DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName());
+ break;
+ case ASSIGN_REGIONS:
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ break;
+ case ADD_TABLE_TO_META:
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), newRegions);
+ break;
+ case CREATE_FS_LAYOUT:
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, false);
+ break;
+ case PRE_CREATE:
+ DeleteTableProcedure.deleteTableStates(env, getTableName());
+ // TODO-MAYBE: call the deleteTable coprocessor event?
+ break;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (Exception e) {
+ // This will be retried. Unless there is a bug in the code,
+ // this should be just a "temporary error" (e.g. network down)
+ LOG.warn("Failed rollback attempt step=" + state + " table=" + getTableName(), e);
+ }
+ }
+
+ protected CreateTableState getState(final int stateId) {
+ return CreateTableState.values()[stateId];
+ }
+
+ private void setNextState(final CreateTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("create-table", "abort requested");
+ } else {
+ setNextState(state.ordinal());
+ }
+ }
+
+ @Override
+ public TableName getTableName() {
+ return hTableDescriptor.getTableName();
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.CREATE;
+ }
+
+ @Override
+ public void abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("CreateTableProcedure(table=");
+ sb.append(getTableName());
+ sb.append(") procId=");
+ sb.append(getProcId());
+ sb.append(" user=");
+ sb.append(user);
+ return sb.toString();
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ // TODO: Convert to protobuf MasterProcedure.proto
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.CreateTableState.Builder state =
+ MasterProcedureProtos.CreateTableState.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setTableSchema(hTableDescriptor.convert());
+ if (newRegions != null) {
+ for (HRegionInfo hri: newRegions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.CreateTableState state =
+ MasterProcedureProtos.CreateTableState.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ hTableDescriptor = HTableDescriptor.convert(state.getTableSchema());
+ if (state.getRegionInfoCount() == 0) {
+ newRegions = null;
+ } else {
+ newRegions = new ArrayList(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ newRegions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ private boolean markTableAsCreating(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ // TODO
+ if (!env.getProcedureQueue().markTableAsCreating(tableName)) {
+ // NOTE: this is only for compatibility with the old client
+ return false;
+ }
+ return !MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName);
+ }
+
+ private void preCreate(final MasterProcedureEnv env) throws IOException {
+ final TableName tableName = getTableName();
+ if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+ throw new TableExistsException(tableName);
+ }
+
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final HRegionInfo[] regions = newRegions == null ? null :
+ newRegions.toArray(new HRegionInfo[newRegions.size()]);
+ cpHost.preCreateTableHandler(hTableDescriptor, regions);
+ }
+ }
+
+ private void postCreate(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ final HRegionInfo[] regions = (newRegions == null) ? null :
+ newRegions.toArray(new HRegionInfo[newRegions.size()]);
+ user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ cpHost.postCreateTableHandler(hTableDescriptor, regions);
+ return null;
+ }
+ });
+ }
+
+ env.getProcedureQueue().markTableAsCreated(getTableName());
+ }
+
+ private void createFsLayout(final MasterProcedureEnv env) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ final Path tempdir = mfs.getTempDir();
+
+ // 1. Create Table Descriptor
+ // using a copy of descriptor, table will be created enabling first
+ TableDescriptor underConstruction = new TableDescriptor(this.hTableDescriptor);
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, getTableName());
+ ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
+ .createTableDescriptorForTableDirectory(
+ tempTableDir, underConstruction, false);
+
+ // 2. Create Regions
+ newRegions = handleCreateHdfsRegions(env, tempdir, getTableName());
+
+ // 3. Move Table temp directory to the hbase root location
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), getTableName());
+ FileSystem fs = mfs.getFileSystem();
+ if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
+ throw new IOException("Couldn't delete " + tableDir);
+ }
+ if (!fs.rename(tempTableDir, tableDir)) {
+ throw new IOException("Unable to move table from temp=" + tempTableDir +
+ " to hbase root=" + tableDir);
+ }
+ }
+
+ /**
+ * Create the on-disk structure for the table, and returns the regions info.
+ * @param tableRootDir directory where the table is being created
+ * @param tableName name of the table under construction
+ * @return the list of regions created
+ */
+ protected List handleCreateHdfsRegions(final MasterProcedureEnv env,
+ final Path tableRootDir, final TableName tableName) throws IOException {
+ HRegionInfo[] regions = newRegions != null ?
+ newRegions.toArray(new HRegionInfo[newRegions.size()]) : null;
+ return ModifyRegionUtils.createRegions(env.getMasterConfiguration(),
+ tableRootDir, hTableDescriptor, regions, null);
+ }
+
+ private void addTableToMeta(final MasterProcedureEnv env) throws IOException {
+ if (newRegions != null && newRegions.size() > 0) {
+ waitMetaRegions(env);
+
+ // Add regions to META
+ addRegionsToMeta(env, newRegions);
+ // Add replicas if needed
+ newRegions = addReplicas(env, hTableDescriptor, newRegions);
+
+ // Setup replication for region replicas if needed
+ if (hTableDescriptor.getRegionReplication() > 1) {
+ ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
+ }
+ }
+ }
+
+ private void assignRegions(final MasterProcedureEnv env) throws IOException {
+ waitRegionServers(env);
+
+ // Trigger immediate assignment of the regions in round-robin fashion
+ final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
+ ModifyRegionUtils.assignRegions(assignmentManager, newRegions);
+
+ // Enable table
+ assignmentManager.getTableStateManager()
+ .setTableState(getTableName(), TableState.State.ENABLED);
+ }
+
+ protected void waitMetaRegions(final MasterProcedureEnv env) throws IOException {
+ int timeout = env.getMasterConfiguration().getInt("hbase.client.catalog.timeout", 10000);
+ try {
+ if (env.getMasterServices().getMetaTableLocator().waitMetaRegionLocation(
+ env.getMasterServices().getZooKeeper(), timeout) == null) {
+ throw new NotAllMetaRegionsOnlineException();
+ }
+ } catch (InterruptedException e) {
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ }
+
+ protected void waitRegionServers(final MasterProcedureEnv env) throws IOException {
+ ServerManager sm = env.getMasterServices().getServerManager();
+ long waitTime = env.getMasterServices().getConfiguration()
+ .getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
+ long waitingTimeForEvents = env.getMasterServices().getConfiguration()
+ .getInt("hbase.master.event.waiting.time", 1000);
+ long done = System.currentTimeMillis() + waitTime;
+ while (System.currentTimeMillis() < done) {
+ List servers = sm.createDestinationServersList();
+ if (servers != null && !servers.isEmpty()) {
+ break;
+ }
+
+ // TODO: Use ProcedureYieldException instead of spinning
+ LOG.warn("Found no destination server to assign region(s)");
+ try {
+ Thread.sleep(waitingTimeForEvents);
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while sleeping");
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ }
+ }
+
+ /**
+ * Create any replicas for the regions (the default replicas that was
+ * already created is passed to the method)
+ * @param hTableDescriptor descriptor to use
+ * @param regions default replicas
+ * @return the combined list of default and non-default replicas
+ */
+ protected List addReplicas(final MasterProcedureEnv env,
+ final HTableDescriptor hTableDescriptor,
+ final List regions) {
+ int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1;
+ if (numRegionReplicas <= 0) {
+ return regions;
+ }
+ List hRegionInfos =
+ new ArrayList((numRegionReplicas+1)*regions.size());
+ for (int i = 0; i < regions.size(); i++) {
+ for (int j = 1; j <= numRegionReplicas; j++) {
+ hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), j));
+ }
+ }
+ hRegionInfos.addAll(regions);
+ return hRegionInfos;
+ }
+
+ /**
+ * Add the specified set of regions to the hbase:meta table.
+ */
+ protected void addRegionsToMeta(final MasterProcedureEnv env,
+ final List regionInfos) throws IOException {
+ MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(),
+ regionInfos, hTableDescriptor.getRegionReplication());
+ }
+
+ private void updateTableDescCache(final MasterProcedureEnv env) throws IOException {
+ env.getMasterServices().getTableDescriptors().get(getTableName());
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
new file mode 100644
index 0000000..a1598c4
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -0,0 +1,435 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotDisabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class DeleteTableProcedure
+ extends StateMachineProcedure
+ implements TableProcedureInterface {
+ private static final Log LOG = LogFactory.getLog(DeleteTableProcedure.class);
+
+ enum DeleteTableState {
+ PRE_DELETE,
+ DELETE_TABLE_DESC_CACHE,
+ UNASSIGN_REGIONS,
+ DELETE_TABLE_FROM_META,
+ DELETE_FS_LAYOUT,
+ POST_DELETE,
+ }
+
+ private List regions;
+ private UserGroupInformation user;
+ private TableName tableName;
+
+ public DeleteTableProcedure() {}
+
+ public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ // quick immediate check on table-exist on submit
+ // NOTE: this is only for compatibility with the old client that has a sync behaviour here
+ env.getMasterServices().checkTableModifiable(tableName);
+ env.getProcedureQueue().markTableAsDeleting(tableName);
+
+ this.tableName = tableName;
+ this.user = env.getRequestUser().getUGI();
+ }
+
+ protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) {
+ if (state == null) {
+ LOG.debug("Starting deletion of table: " + tableName);
+ state = DeleteTableState.PRE_DELETE;
+ setNextState(state);
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case PRE_DELETE:
+ if (!preDelete(env)) {
+ assert isFailed() : "the delete should have an exception here";
+ return Flow.NO_MORE_STATE;
+ }
+
+ // TODO: Move out... in the acquireLock()
+ LOG.debug("waiting for '" + getTableName() + "' regions in transition");
+ regions = getRegionsFromMeta(env, getTableName());
+ waitRegionInTransition(env, regions);
+
+ setNextState(DeleteTableState.DELETE_FS_LAYOUT);
+ break;
+ case DELETE_FS_LAYOUT:
+ LOG.debug("delete '" + getTableName() + "' from filesystem");
+ DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
+ setNextState(DeleteTableState.DELETE_TABLE_DESC_CACHE);
+ break;
+ case DELETE_TABLE_DESC_CACHE:
+ LOG.debug("delete '" + getTableName() + "' descriptor");
+ DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName());
+ setNextState(DeleteTableState.UNASSIGN_REGIONS);
+ break;
+ case UNASSIGN_REGIONS:
+ LOG.debug("delete '" + getTableName() + "' assignment state");
+ DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+ setNextState(DeleteTableState.DELETE_TABLE_FROM_META);
+ break;
+ case DELETE_TABLE_FROM_META:
+ LOG.debug("delete '" + getTableName() + "' regions from META");
+ DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
+ setNextState(DeleteTableState.POST_DELETE);
+ break;
+ case POST_DELETE:
+ postDelete(env);
+ LOG.debug("delete '" + getTableName() + "' completed");
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (Exception e) {
+ LOG.warn("Error trying to delete table=" + getTableName() + " state=" + state, e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ protected void rollbackState(final MasterProcedureEnv env, final DeleteTableState state) {
+ if (state == DeleteTableState.PRE_DELETE) {
+ // nothing to rollback, pre-delete is just table-state checks.
+ // We can fail if the table does not exist or is not disabled.
+ return;
+ }
+
+ // The delete doesn't have a rollback. The execution will succeed, at some point.
+ throw new UnsupportedOperationException();
+ }
+
+ protected DeleteTableState getState(final int stateId) {
+ return DeleteTableState.values()[stateId];
+ }
+
+ private void setNextState(final DeleteTableState state) {
+ setNextState(state.ordinal());
+ }
+
+ @Override
+ public TableName getTableName() {
+ return tableName;
+ }
+
+ @Override
+ public TableOperationType getTableOperationType() {
+ return TableOperationType.DELETE;
+ }
+
+ @Override
+ public void abort(final MasterProcedureEnv env) {
+ // TODO: We may be able to abort if the procedure is not started yet.
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ return env.getProcedureQueue().tryAcquireTableWrite(getTableName());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableWrite(getTableName());
+ }
+
+ @Override
+ protected void completionCleanup(final MasterProcedureEnv env) {
+ // Remove the run-queue of the table
+ if (!env.getProcedureQueue().markTableAsDeleted(tableName)) {
+ // There are requests in the queue (e.g. a new create)
+ return;
+ }
+
+ // Remove the table lock
+ try {
+ env.getMasterServices().getTableLockManager().tableDeleted(getTableName());
+ } catch (IOException e) {
+ LOG.warn("Received exception from TableLockManager.tableDeleted:", e); //not critical
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("DeleteTableProcedure(table=");
+ sb.append(getTableName());
+ sb.append(") procId=");
+ sb.append(getProcId());
+ sb.append(" user=");
+ sb.append(user);
+ return sb.toString();
+ }
+
+ protected void waitRegionInTransition(final MasterProcedureEnv env,
+ final List regions) throws IOException, CoordinatedStateException {
+ AssignmentManager am = env.getMasterServices().getAssignmentManager();
+ RegionStates states = am.getRegionStates();
+ long waitTime = env.getMasterServices().getConfiguration()
+ .getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
+ long waitingTimeForEvents = env.getMasterServices().getConfiguration()
+ .getInt("hbase.master.event.waiting.time", 1000);
+ for (HRegionInfo region : regions) {
+ long done = System.currentTimeMillis() + waitTime;
+ while (System.currentTimeMillis() < done) {
+ if (states.isRegionInState(region, State.FAILED_OPEN)) {
+ am.regionOffline(region);
+ }
+ if (!states.isRegionInTransition(region)) break;
+ try {
+ Thread.sleep(waitingTimeForEvents);
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while sleeping");
+ throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+ }
+ LOG.debug("Waiting on region to clear regions in transition; "
+ + am.getRegionStates().getRegionTransitionState(region));
+ }
+ if (states.isRegionInTransition(region)) {
+ throw new IOException("Waited hbase.master.wait.on.region (" +
+ waitTime + "ms) for region to leave region " +
+ region.getRegionNameAsString() + " in transitions");
+ }
+ }
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ MasterProcedureProtos.DeleteTableState.Builder state =
+ MasterProcedureProtos.DeleteTableState.newBuilder()
+ .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+ .setTableName(ProtobufUtil.toProtoTableName(tableName));
+ if (regions != null) {
+ for (HRegionInfo hri: regions) {
+ state.addRegionInfo(HRegionInfo.convert(hri));
+ }
+ }
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ MasterProcedureProtos.DeleteTableState state =
+ MasterProcedureProtos.DeleteTableState.parseDelimitedFrom(stream);
+ user = MasterProcedureUtil.toUserInfo(state.getUserInfo());
+ tableName = ProtobufUtil.toTableName(state.getTableName());
+ if (state.getRegionInfoCount() == 0) {
+ regions = null;
+ } else {
+ regions = new ArrayList(state.getRegionInfoCount());
+ for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) {
+ regions.add(HRegionInfo.convert(hri));
+ }
+ }
+ }
+
+ private boolean preDelete(final MasterProcedureEnv env) throws IOException {
+ try {
+ env.getMasterServices().checkTableModifiable(tableName);
+ } catch (TableNotFoundException|TableNotDisabledException e) {
+ setFailure("master-delete-table", new TableNotFoundException(tableName));
+ return false;
+ }
+
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ cpHost.preDeleteTableHandler(tableName);
+ }
+ return true;
+ }
+
+ private void postDelete(final MasterProcedureEnv env)
+ throws IOException, InterruptedException {
+ deleteTableStates(env, tableName);
+
+ final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+ if (cpHost != null) {
+ cpHost.postDeleteTableHandler(tableName);
+ }
+ }
+
+ protected static void deleteFromFs(final MasterProcedureEnv env,
+ final TableName tableName, final List regions,
+ final boolean archive) throws IOException {
+ final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+ final FileSystem fs = mfs.getFileSystem();
+ final Path tempdir = mfs.getTempDir();
+
+ final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
+ final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
+
+ if (fs.exists(tableDir)) {
+ // Ensure temp exists
+ if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ // Ensure parent exists
+ if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ // Move the table in /hbase/.tmp
+ if (!fs.rename(tableDir, tempTableDir)) {
+ if (fs.exists(tempTableDir)) {
+ // TODO
+ // what's in this dir? something old? probably something manual from the user...
+ // let's get rid of this stuff...
+ FileStatus[] files = fs.listStatus(tempdir);
+ if (files != null && files.length > 0) {
+ for (int i = 0; i < files.length; ++i) {
+ if (!files[i].isDir()) continue;
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath());
+ }
+ }
+ fs.delete(tempdir, true);
+ }
+ throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
+ }
+ }
+
+ // Archive regions from FS (temp directory)
+ if (archive) {
+ for (HRegionInfo hri : regions) {
+ LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
+ HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
+ tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+ }
+ LOG.debug("Table '" + tableName + "' archived!");
+ }
+
+ // Delete table directory from FS (temp directory)
+ if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
+ throw new IOException("Couldn't delete " + tempTableDir);
+ }
+ }
+
+ protected static List getRegionsFromMeta(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ if (TableName.META_TABLE_NAME.equals(tableName)) {
+ return new MetaTableLocator().getMetaRegions(env.getMasterServices().getZooKeeper());
+ } else {
+ return MetaTableAccessor.getTableRegions(env.getMasterServices().getConnection(), tableName);
+ }
+ }
+
+ /**
+ * There may be items for this table still up in hbase:meta in the case where the
+ * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta
+ * that have to do with this table. See HBASE-12980.
+ * @throws IOException
+ */
+ private static void cleanAnyRemainingRows(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ ClusterConnection connection = env.getMasterServices().getConnection();
+ Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName);
+ try (Table metaTable =
+ connection.getTable(TableName.META_TABLE_NAME)) {
+ List deletes = new ArrayList();
+ try (ResultScanner resScanner = metaTable.getScanner(tableScan)) {
+ for (Result result : resScanner) {
+ deletes.add(new Delete(result.getRow()));
+ }
+ }
+ if (!deletes.isEmpty()) {
+ LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName +
+ " from " + TableName.META_TABLE_NAME);
+ metaTable.delete(deletes);
+ }
+ }
+ }
+
+ protected static void deleteFromMeta(final MasterProcedureEnv env,
+ final TableName tableName, List regions) throws IOException {
+ MetaTableAccessor.deleteRegions(env.getMasterServices().getConnection(), regions);
+
+ // Clean any remaining rows for this table.
+ cleanAnyRemainingRows(env, tableName);
+ }
+
+ protected static void deleteAssignmentState(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ AssignmentManager am = env.getMasterServices().getAssignmentManager();
+
+ // Clean up regions of the table in RegionStates.
+ LOG.debug("Removing '" + tableName + "' from region states.");
+ am.getRegionStates().tableDeleted(tableName);
+
+ // If entry for this table states, remove it.
+ LOG.debug("Marking '" + tableName + "' as deleted.");
+ am.getTableStateManager().setDeletedTable(tableName);
+ }
+
+ protected static void deleteTableDescriptorCache(final MasterProcedureEnv env,
+ final TableName tableName) throws IOException {
+ LOG.debug("Removing '" + tableName + "' descriptor.");
+ env.getMasterServices().getTableDescriptors().remove(tableName);
+ }
+
+ protected static void deleteTableStates(final MasterProcedureEnv env, final TableName tableName)
+ throws IOException {
+ env.getMasterServices().getMasterQuotaManager().removeTableFromNamespaceQuota(tableName);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java
new file mode 100644
index 0000000..a76808e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public final class MasterProcedureConstants {
+ private MasterProcedureConstants() {}
+
+ public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcLogs";
+
+ public static final String MASTER_PROCEDURE_THREADS = "hbase.master.procedure.threads";
+ public static final int DEFAULT_MIN_MASTER_PROCEDURE_THREADS = 4;
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
new file mode 100644
index 0000000..55c6b88
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class MasterProcedureEnv {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureEnv.class);
+
+ @InterfaceAudience.Private
+ public static class WALStoreLeaseRecovery implements WALProcedureStore.LeaseRecovery {
+ private final HMaster master;
+
+ public WALStoreLeaseRecovery(final HMaster master) {
+ this.master = master;
+ }
+
+ @Override
+ public void recoverFileLease(final FileSystem fs, final Path path) throws IOException {
+ final Configuration conf = master.getConfiguration();
+ final FSUtils fsUtils = FSUtils.getInstance(fs, conf);
+ fsUtils.recoverFileLease(fs, path, conf, new CancelableProgressable() {
+ @Override
+ public boolean progress() {
+ LOG.debug("Recover Procedure Store log lease: " + path);
+ return master.isActiveMaster();
+ }
+ });
+ }
+ }
+
+ private final MasterProcedureQueue procQueue;
+ private final MasterServices master;
+
+ public MasterProcedureEnv(final MasterServices master) {
+ this.master = master;
+ this.procQueue = new MasterProcedureQueue(master.getTableLockManager());
+ }
+
+ public User getRequestUser() throws IOException {
+ if (RequestContext.isInRequestContext()) {
+ return RequestContext.getRequestUser();
+ } else {
+ return UserProvider.instantiate(getMasterConfiguration()).getCurrent();
+ }
+ }
+
+ public MasterServices getMasterServices() {
+ return master;
+ }
+
+ public Configuration getMasterConfiguration() {
+ return master.getConfiguration();
+ }
+
+ public MasterCoprocessorHost getMasterCoprocessorHost() {
+ return master.getMasterCoprocessorHost();
+ }
+
+ public MasterProcedureQueue getProcedureQueue() {
+ return procQueue;
+ }
+
+ public boolean isRunning() {
+ return master.getMasterProcedureExecutor().isRunning();
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java
new file mode 100644
index 0000000..913fe42
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java
@@ -0,0 +1,315 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureFairRunQueues;
+import org.apache.hadoop.hbase.procedure2.ProcedureRunnableSet;
+import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class MasterProcedureQueue implements ProcedureRunnableSet {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureQueue.class);
+
+ private final ProcedureFairRunQueues fairq;
+ private final ReentrantLock lock = new ReentrantLock();
+ private final Condition waitCond = lock.newCondition();
+ private final TableLockManager lockManager;
+
+ private int queueSize;
+
+ public MasterProcedureQueue(final TableLockManager lockManager) {
+ this.fairq = new ProcedureFairRunQueues(1);
+ this.lockManager = lockManager;
+ }
+
+ @Override
+ public void addFront(final Procedure proc) {
+ lock.lock();
+ try {
+ getRunQueue(proc).addFront(proc);
+ queueSize++;
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void addBack(final Procedure proc) {
+ lock.lock();
+ try {
+ getRunQueue(proc).addBack(proc);
+ queueSize++;
+ waitCond.signal();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void yield(final Procedure proc) {
+ addFront(proc);
+ }
+
+ @Override
+ public Long poll() {
+ lock.lock();
+ try {
+ if (queueSize == 0) {
+ waitCond.await();
+ if (queueSize == 0) {
+ return null;
+ }
+ }
+
+ TableRunQueue queue = fairq.poll();
+ if (queue != null && queue.isAvailable()) {
+ queueSize--;
+ return queue.poll();
+ }
+ } catch (InterruptedException e) {
+ return null;
+ } finally {
+ lock.unlock();
+ }
+ return null;
+ }
+
+ @Override
+ public void signalAll() {
+ lock.lock();
+ try {
+ waitCond.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void clear() {
+ lock.lock();
+ try {
+ fairq.clear();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public int size() {
+ lock.lock();
+ try {
+ return queueSize;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ protected boolean markTableAsCreating(final TableName table) {
+ TableRunQueue queue = fairq.add(table, new TableRunQueue());
+ switch (queue.getState()) {
+ case CREATING:
+ case AVAILABLE:
+ return false;
+ default:
+ break;
+ }
+ queue.setState(TableRunQueue.State.CREATING);
+ return true;
+ }
+
+ protected void markTableAsCreated(final TableName table) {
+ TableRunQueue queue = fairq.get(table);
+ queue.setState(TableRunQueue.State.AVAILABLE);
+ }
+
+ protected boolean markTableAsDeleting(final TableName table) {
+ TableRunQueue queue = fairq.get(table);
+ if (queue != null) {
+ queue.setState(TableRunQueue.State.DELETING);
+ }
+ return true;
+ }
+
+ protected boolean markTableAsDeleted(final TableName table) {
+ // TODO
+ return true;
+ }
+
+ private TableRunQueue getRunQueue(final Procedure proc) {
+ if (proc instanceof TableProcedureInterface) {
+ // TODO: Improve run-queue push with TableProcedureInterface.getType()
+ // we can take smart decisions based on the type of the operation (e.g. create/delete)
+ return getRunQueue(((TableProcedureInterface)proc).getTableName());
+ }
+ return null;
+ }
+
+ private TableRunQueue getRunQueue(final TableName table) {
+ final TableRunQueue queue = fairq.get(table);
+ return queue != null ? queue : fairq.add(table, new TableRunQueue());
+ }
+
+ public boolean tryAcquireTableRead(final TableName table) {
+ return getRunQueue(table).tryRead(lockManager, table, "");
+ }
+
+ public void releaseTableRead(final TableName table) {
+ getRunQueue(table).releaseRead(lockManager, table);
+ }
+
+ public boolean tryAcquireTableWrite(final TableName table) {
+ return getRunQueue(table).tryWrite(lockManager, table, "");
+ }
+
+ public void releaseTableWrite(final TableName table) {
+ getRunQueue(table).releaseWrite(lockManager, table);
+ }
+
+ private static class TableRunQueue implements ProcedureFairRunQueues.FairObject {
+ enum State { UNKNOWN, CREATING, AVAILABLE, DELETING }
+
+ private final Deque runnables = new ArrayDeque();
+
+ private State state = State.UNKNOWN;
+ private TableLock tableLock = null;
+ private boolean xlock = false;
+ private int rlock = 0;
+
+ public void setState(State state) {
+ this.state = state;
+ }
+
+ public State getState() {
+ return this.state;
+ }
+
+ public void addFront(final Procedure proc) {
+ runnables.addFirst(proc.getProcId());
+ }
+
+ public void addBack(final Procedure proc) {
+ runnables.addLast(proc.getProcId());
+ }
+
+ public Long poll() {
+ return runnables.poll();
+ }
+
+ public boolean isAvailable() {
+ synchronized (this) {
+ return !xlock && !runnables.isEmpty();
+ }
+ }
+
+ public boolean isEmpty() {
+ return runnables.isEmpty();
+ }
+
+ public boolean tryRead(final TableLockManager lockManager,
+ final TableName tableName, final String purpose) {
+ synchronized (this) {
+ if (xlock) {
+ return false;
+ }
+
+ // Take zk-read-lock
+ tableLock = lockManager.readLock(tableName, purpose);
+ try {
+ tableLock.acquire();
+ } catch (IOException e) {
+ LOG.error("failed acquire read lock on " + tableName, e);
+ tableLock = null;
+ return false;
+ }
+
+ rlock++;
+ }
+ return true;
+ }
+
+ public void releaseRead(final TableLockManager lockManager,
+ final TableName tableName) {
+ synchronized (this) {
+ releaseTableLock(lockManager);
+ rlock--;
+ }
+ }
+
+ public boolean tryWrite(final TableLockManager lockManager,
+ final TableName tableName, final String purpose) {
+ synchronized (this) {
+ if (xlock) {
+ return false;
+ }
+
+ // Take zk-write-lock
+ tableLock = lockManager.writeLock(tableName, purpose);
+ try {
+ tableLock.acquire();
+ } catch (IOException e) {
+ LOG.error("failed acquire write lock on " + tableName, e);
+ tableLock = null;
+ return false;
+ }
+ xlock = true;
+ }
+ return true;
+ }
+
+ public void releaseWrite(final TableLockManager lockManager,
+ final TableName tableName) {
+ synchronized (this) {
+ releaseTableLock(lockManager);
+ xlock = false;
+ }
+ }
+
+ private void releaseTableLock(final TableLockManager lockManager) {
+ for (int i = 0; i < 3; ++i) {
+ try {
+ tableLock.release();
+ tableLock = null;
+ break;
+ } catch (IOException e) {
+ LOG.warn("Could not release the table write-lock", e);
+ }
+ }
+ }
+
+ @Override
+ public int getPriority() {
+ return 1;
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
new file mode 100644
index 0000000..d7c0b92
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class MasterProcedureUtil {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureUtil.class);
+
+ private MasterProcedureUtil() {}
+
+ public static UserInformation toProtoUserInfo(UserGroupInformation ugi) {
+ UserInformation.Builder userInfoPB = UserInformation.newBuilder();
+ userInfoPB.setEffectiveUser(ugi.getUserName());
+ if (ugi.getRealUser() != null) {
+ userInfoPB.setRealUser(ugi.getRealUser().getUserName());
+ }
+ return userInfoPB.build();
+ }
+
+ public static UserGroupInformation toUserInfo(UserInformation userInfoProto) {
+ if (userInfoProto.hasEffectiveUser()) {
+ String effectiveUser = userInfoProto.getEffectiveUser();
+ if (userInfoProto.hasRealUser()) {
+ String realUser = userInfoProto.getRealUser();
+ UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser);
+ return UserGroupInformation.createProxyUser(effectiveUser, realUserUgi);
+ }
+ return UserGroupInformation.createRemoteUser(effectiveUser);
+ }
+ return null;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
new file mode 100644
index 0000000..af2e2ef
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.TableName;
+
+/**
+ * Procedures that operates on a specific Table (e.g. create, delete, snapshot, ...)
+ * must implement this interface to allow the system handle the lock/concurrency problems.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface TableProcedureInterface {
+ public enum TableOperationType { CREATE, DELETE, EDIT };
+
+ /**
+ * @return the name of the table the procedure is operating on
+ */
+ TableName getTableName();
+
+ /**
+ * Given an operation type we can take decisions about what to do with pending operations.
+ * e.g. if we get a delete and we have some table operation pending (e.g. add column)
+ * we can abort those operations.
+ * @return the operation type that the procedure is executing.
+ */
+ TableOperationType getTableOperationType();
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 9893fc8..5fe5f8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.namespace.NamespaceAuditor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
@@ -444,14 +444,11 @@ public class MasterQuotaManager implements RegionStateListener {
new HRegionInfo(QuotaUtil.QUOTA_TABLE_NAME)
};
- masterServices.getExecutorService()
- .submit(new CreateTableHandler(masterServices,
- masterServices.getMasterFileSystem(),
- QuotaUtil.QUOTA_TABLE_DESC,
- masterServices.getConfiguration(),
- newRegions,
- masterServices)
- .prepare());
+ masterServices.getMasterProcedureExecutor()
+ .submitProcedure(new CreateTableProcedure(
+ masterServices.getMasterProcedureExecutor().getEnvironment(),
+ QuotaUtil.QUOTA_TABLE_DESC,
+ newRegions));
}
private static class NamedLock {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 95d8a17..347cad5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -67,6 +67,30 @@ public abstract class ModifyRegionUtils {
void editRegion(final HRegionInfo region) throws IOException;
}
+ public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor,
+ byte[][] splitKeys) {
+ long regionId = System.currentTimeMillis();
+ HRegionInfo[] hRegionInfos = null;
+ if (splitKeys == null || splitKeys.length == 0) {
+ hRegionInfos = new HRegionInfo[]{
+ new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId)
+ };
+ } else {
+ int numRegions = splitKeys.length + 1;
+ hRegionInfos = new HRegionInfo[numRegions];
+ byte[] startKey = null;
+ byte[] endKey = null;
+ for (int i = 0; i < numRegions; i++) {
+ endKey = (i == splitKeys.length) ? null : splitKeys[i];
+ hRegionInfos[i] =
+ new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
+ false, regionId);
+ startKey = endKey;
+ }
+ }
+ return hRegionInfos;
+ }
+
/**
* Create new set of regions on the specified file-system.
* NOTE: that you should add the regions to hbase:meta after this operation.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 8ed49ff..2c13f39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -63,6 +63,8 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
@@ -261,6 +263,11 @@ public class TestCatalogJanitor {
}
@Override
+ public ProcedureExecutor getMasterProcedureExecutor() {
+ return null;
+ }
+
+ @Override
public ServerManager getServerManager() {
return null;
}
@@ -912,7 +919,7 @@ public class TestCatalogJanitor {
MasterServices services = new MockMasterServices(server);
// create the janitor
-
+
CatalogJanitor janitor = new CatalogJanitor(server, services);
// Create regions.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
new file mode 100644
index 0000000..64734cf
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class MasterProcedureTestingUtility {
+ private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);
+
+ private MasterProcedureTestingUtility() {
+ }
+
+ public static HTableDescriptor createHTD(final TableName tableName, final String... family) {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ for (int i = 0; i < family.length; ++i) {
+ htd.addFamily(new HColumnDescriptor(family[i]));
+ }
+ return htd;
+ }
+
+ public static HRegionInfo[] createTable(final ProcedureExecutor procExec,
+ final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
+ HTableDescriptor htd = createHTD(tableName, family);
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+ return regions;
+ }
+
+ public static void validateTableCreation(final HMaster master, final TableName tableName,
+ final HRegionInfo[] regions, String... family) throws IOException {
+ final FileSystem fs = master.getMasterFileSystem().getFileSystem();
+ final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
+ assertTrue(fs.exists(tableDir));
+ for (int i = 0; i < regions.length; ++i) {
+ Path regionDir = new Path(tableDir, regions[i].getEncodedName());
+ assertTrue(fs.exists(regionDir));
+ for (int j = 0; j < family.length; ++j) {
+ final Path familyDir = new Path(regionDir, family[j]);
+ assertTrue(fs.exists(familyDir));
+ }
+ }
+ // TODO: check the filesystem and meta
+ }
+
+ public static void validateTableDeletion(final HMaster master, final TableName tableName,
+ final HRegionInfo[] regions, String... family) throws IOException {
+ final FileSystem fs = master.getMasterFileSystem().getFileSystem();
+ final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
+ assertFalse(fs.exists(tableDir));
+ // TODO: check the filesystem and meta
+ }
+
+ public static class InjectAbortOnLoadListener
+ implements ProcedureExecutor.ProcedureExecutorListener {
+ private final ProcedureExecutor procExec;
+
+ public InjectAbortOnLoadListener(final ProcedureExecutor procExec) {
+ this.procExec = procExec;
+ }
+
+ @Override
+ public void procedureLoaded(long procId) {
+ procExec.abort(procId);
+ }
+
+ @Override
+ public void procedureAdded(long procId) { /* no-op */ }
+
+ @Override
+ public void procedureFinished(long procId) { /* no-op */ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
new file mode 100644
index 0000000..bcbd738
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestCreateTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestCreateTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ resetProcExecutorTestingKillFlag();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ resetProcExecutorTestingKillFlag();
+ for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
+ LOG.info("Tear down, remove table=" + htd.getTableName());
+ UTIL.deleteTable(htd.getTableName());
+ }
+ }
+
+ private void resetProcExecutorTestingKillFlag() {
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleCreate() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleCreate");
+ final byte[][] splitKeys = null;
+ testSimpleCreate(tableName, splitKeys);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleCreateWithSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleCreateWithSplits");
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ testSimpleCreate(tableName, splitKeys);
+ }
+
+ private void testSimpleCreate(final TableName tableName, byte[][] splitKeys) throws Exception {
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ @Test(timeout=60000, expected=TableExistsException.class)
+ public void testCreateExisting() throws Exception {
+ final TableName tableName = TableName.valueOf("testCreateExisting");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
+ final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
+
+ // create the table
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId));
+
+ // create another with the same name
+ procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+
+ // create the table
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ byte[][] splitKeys = null;
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ //
+ // NOTE: the 6 (number of CreateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ for (int i = 0; i < 6; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + CreateTableProcedure.CreateTableState.values()[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ @Test(timeout=60000)
+ public void testRollbackAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
+
+ // create the table
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ //
+ // NOTE: the 4 (number of CreateTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ final int LAST_STEP = 4;
+ for (int i = 0; i < LAST_STEP; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + CreateTableProcedure.CreateTableState.values()[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+
+ // Restart the executor and rollback the step twice
+ // rollback step N - kill before store update
+ // restart executor/store
+ // rollback step N - save on store
+ MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener =
+ new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec);
+ procExec.registerListener(abortListener);
+ try {
+ for (int i = LAST_STEP + 1; i >= 0; --i) {
+ LOG.info("Restart " + i +
+ " rollback state: "+ CreateTableProcedure.CreateTableState.values()[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ } finally {
+ assertTrue(procExec.unregisterListener(abortListener));
+ }
+
+ ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId));
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
new file mode 100644
index 0000000..bc9d9a2
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureResult;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestDeleteTableProcedure {
+ private static final Log LOG = LogFactory.getLog(TestDeleteTableProcedure.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ }
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ }
+
+ @Test(timeout=60000, expected=TableNotFoundException.class)
+ public void testDeleteNotExistentTable2() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteNotExistentTable");
+
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ }
+
+ @Test(timeout=60000)
+ public void testDeleteDeletedTable() throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteDeletedTable");
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ procExec, tableName, null, "f");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // delete the table (that exists)
+ long procId1 = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ // delete the table (that will no longer exist)
+ long procId2 = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+
+ // Wait the completion
+ ProcedureTestingUtility.waitProcedure(procExec, procId1);
+ ProcedureTestingUtility.waitProcedure(procExec, procId2);
+
+ // First delete should succeed
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f");
+
+ // Second delete should fail with TableNotFound
+ ProcedureResult result = procExec.getResult(procId2);
+ assertTrue(result.isFailed());
+ LOG.debug("Delete failed with exception: " + result.getException());
+ assertTrue(result.getException().getCause() instanceof TableNotFoundException);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleDelete() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleDelete");
+ final byte[][] splitKeys = null;
+ testSimpleDelete(tableName, splitKeys);
+ }
+
+ @Test(timeout=60000)
+ public void testSimpleDeleteWithSplits() throws Exception {
+ final TableName tableName = TableName.valueOf("testSimpleDeleteWithSplits");
+ final byte[][] splitKeys = new byte[][] {
+ Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
+ };
+ testSimpleDelete(tableName, splitKeys);
+ }
+
+ private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) throws Exception {
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ // delete the table
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ long procId = ProcedureTestingUtility.submitAndWait(procExec,
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ @Test(timeout=60000)
+ public void testRecoveryAndDoubleExecution() throws Exception {
+ final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
+
+ // create the table
+ byte[][] splitKeys = null;
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ // Restart the executor and execute the step twice
+ // execute step N - kill before store update
+ // restart executor/store
+ // execute step N - save on store
+ //
+ // NOTE: the 6 (number of DeleteTableState steps) is hardcoded,
+ // so you have to look at this test at least once when you add a new step.
+ for (int i = 0; i < 6; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + DeleteTableProcedure.DeleteTableState.values()[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
new file mode 100644
index 0000000..443bbf9
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({MasterTests.class, LargeTests.class})
+public class TestMasterFailoverWithProcedures {
+ private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class);
+
+ protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ private static void setupConf(Configuration conf) {
+ }
+
+ @Before
+ public void setup() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(2, 3);
+
+ final ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("failure shutting down cluster", e);
+ }
+ }
+
+ // ==========================================================================
+ // Test Create Table
+ // ==========================================================================
+
+ @Test(timeout=60000)
+ public void testCreateWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestCreateTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testCreateWithFailoverAtStep(CreateTableProcedure.CreateTableState.ASSIGN_REGIONS.ordinal());
+ }
+
+ private void testCreateWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step);
+
+ // create the table
+ ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Create procedure && kill the executor
+ byte[][] splitKeys = null;
+ HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
+ HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
+ long procId = procExec.submitProcedure(
+ new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ for (int i = 0; i < step; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + CreateTableProcedure.CreateTableState.values()[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+
+ LOG.info("Trigger master failover");
+ masterFailover();
+
+ procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ // ==========================================================================
+ // Test Delete Table
+ // ==========================================================================
+
+ @Test(timeout=60000)
+ public void testDeleteWithFailover() throws Exception {
+ // TODO: Should we try every step? (master failover takes long time)
+ // It is already covered by TestDeleteTableProcedure
+ // but without the master restart, only the executor/store is restarted.
+ // Without Master restart we may not find bug in the procedure code
+ // like missing "wait" for resources to be available (e.g. RS)
+ testDeleteWithFailoverAtStep(DeleteTableProcedure.DeleteTableState.UNASSIGN_REGIONS.ordinal());
+ }
+
+ private void testDeleteWithFailoverAtStep(final int step) throws Exception {
+ final TableName tableName = TableName.valueOf("testDeleteWithFailoverAtStep" + step);
+
+ // create the table
+ byte[][] splitKeys = null;
+ HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
+ getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
+ Path tableDir = FSUtils.getTableDir(getRootDir(), tableName);
+ MasterProcedureTestingUtility.validateTableCreation(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ UTIL.getHBaseAdmin().disableTable(tableName);
+
+ ProcedureExecutor procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
+ ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
+
+ // Start the Delete procedure && kill the executor
+ long procId = procExec.submitProcedure(
+ new DeleteTableProcedure(procExec.getEnvironment(), tableName));
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+
+ for (int i = 0; i < step; ++i) {
+ LOG.info("Restart "+ i +" exec state: " + DeleteTableProcedure.DeleteTableState.values()[i]);
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+ ProcedureTestingUtility.restart(procExec);
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ }
+ ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+
+ LOG.info("Trigger master failover");
+ masterFailover();
+
+ procExec = getMasterProcedureExecutor();
+ ProcedureTestingUtility.waitProcedure(procExec, procId);
+ ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+
+ MasterProcedureTestingUtility.validateTableDeletion(
+ UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
+ }
+
+ // ==========================================================================
+ //
+ // ==========================================================================
+
+ private void masterFailover() throws Exception {
+ MiniHBaseCluster cluster = UTIL.getMiniHBaseCluster();
+
+ // Kill the master
+ HMaster oldMaster = cluster.getMaster();
+ cluster.killMaster(cluster.getMaster().getServerName());
+
+ // Wait the secondary
+ HMaster newMaster = cluster.getMaster();
+ while (newMaster == null || newMaster == oldMaster) {
+ Thread.sleep(250);
+ newMaster = cluster.getMaster();
+ }
+
+ while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) {
+ Thread.sleep(250);
+ }
+ }
+
+ private MasterProcedureEnv getMasterProcedureEnv() {
+ return getMasterProcedureExecutor().getEnvironment();
+ }
+
+ private ProcedureExecutor getMasterProcedureExecutor() {
+ return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ private FileSystem getFileSystem() {
+ return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
+ }
+
+ private Path getRootDir() {
+ return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
+ }
+
+ private Path getTempDir() {
+ return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getTempDir();
+ }
+}