diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
index 3d1f4bd..ca1db1e 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -10754,6 +10754,1621 @@ public final class RSGroupAdminProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerResponse)
}
+ public interface MoveServersAndTablesRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string target_group = 1;
+ /**
+ * required string target_group = 1;
+ */
+ boolean hasTargetGroup();
+ /**
+ * required string target_group = 1;
+ */
+ java.lang.String getTargetGroup();
+ /**
+ * required string target_group = 1;
+ */
+ com.google.protobuf.ByteString
+ getTargetGroupBytes();
+
+ // repeated .hbase.pb.ServerName servers = 2;
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ java.util.List
+ getServersList();
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index);
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ int getServersCount();
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList();
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index);
+
+ // repeated .hbase.pb.TableName table_name = 3;
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ java.util.List
+ getTableNameList();
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index);
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ int getTableNameCount();
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameOrBuilderList();
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code hbase.pb.MoveServersAndTablesRequest}
+ */
+ public static final class MoveServersAndTablesRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements MoveServersAndTablesRequestOrBuilder {
+ // Use MoveServersAndTablesRequest.newBuilder() to construct.
+ private MoveServersAndTablesRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MoveServersAndTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MoveServersAndTablesRequest defaultInstance;
+ public static MoveServersAndTablesRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MoveServersAndTablesRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MoveServersAndTablesRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ targetGroup_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ servers_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
+ break;
+ }
+ case 26: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ tableName_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ servers_ = java.util.Collections.unmodifiableList(servers_);
+ }
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ tableName_ = java.util.Collections.unmodifiableList(tableName_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public MoveServersAndTablesRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MoveServersAndTablesRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string target_group = 1;
+ public static final int TARGET_GROUP_FIELD_NUMBER = 1;
+ private java.lang.Object targetGroup_;
+ /**
+ * required string target_group = 1;
+ */
+ public boolean hasTargetGroup() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public java.lang.String getTargetGroup() {
+ java.lang.Object ref = targetGroup_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ targetGroup_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTargetGroupBytes() {
+ java.lang.Object ref = targetGroup_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetGroup_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .hbase.pb.ServerName servers = 2;
+ public static final int SERVERS_FIELD_NUMBER = 2;
+ private java.util.List servers_;
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public java.util.List getServersList() {
+ return servers_;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList() {
+ return servers_;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public int getServersCount() {
+ return servers_.size();
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) {
+ return servers_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index) {
+ return servers_.get(index);
+ }
+
+ // repeated .hbase.pb.TableName table_name = 3;
+ public static final int TABLE_NAME_FIELD_NUMBER = 3;
+ private java.util.List tableName_;
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public java.util.List getTableNameList() {
+ return tableName_;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameOrBuilderList() {
+ return tableName_;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public int getTableNameCount() {
+ return tableName_.size();
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) {
+ return tableName_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(
+ int index) {
+ return tableName_.get(index);
+ }
+
+ private void initFields() {
+ targetGroup_ = "";
+ servers_ = java.util.Collections.emptyList();
+ tableName_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTargetGroup()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getServersCount(); i++) {
+ if (!getServers(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getTableNameCount(); i++) {
+ if (!getTableName(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTargetGroupBytes());
+ }
+ for (int i = 0; i < servers_.size(); i++) {
+ output.writeMessage(2, servers_.get(i));
+ }
+ for (int i = 0; i < tableName_.size(); i++) {
+ output.writeMessage(3, tableName_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTargetGroupBytes());
+ }
+ for (int i = 0; i < servers_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, servers_.get(i));
+ }
+ for (int i = 0; i < tableName_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, tableName_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest) obj;
+
+ boolean result = true;
+ result = result && (hasTargetGroup() == other.hasTargetGroup());
+ if (hasTargetGroup()) {
+ result = result && getTargetGroup()
+ .equals(other.getTargetGroup());
+ }
+ result = result && getServersList()
+ .equals(other.getServersList());
+ result = result && getTableNameList()
+ .equals(other.getTableNameList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTargetGroup()) {
+ hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER;
+ hash = (53 * hash) + getTargetGroup().hashCode();
+ }
+ if (getServersCount() > 0) {
+ hash = (37 * hash) + SERVERS_FIELD_NUMBER;
+ hash = (53 * hash) + getServersList().hashCode();
+ }
+ if (getTableNameCount() > 0) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableNameList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.MoveServersAndTablesRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getServersFieldBuilder();
+ getTableNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ targetGroup_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (serversBuilder_ == null) {
+ servers_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ serversBuilder_.clear();
+ }
+ if (tableNameBuilder_ == null) {
+ tableName_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ tableNameBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.targetGroup_ = targetGroup_;
+ if (serversBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ servers_ = java.util.Collections.unmodifiableList(servers_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.servers_ = servers_;
+ } else {
+ result.servers_ = serversBuilder_.build();
+ }
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ tableName_ = java.util.Collections.unmodifiableList(tableName_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance()) return this;
+ if (other.hasTargetGroup()) {
+ bitField0_ |= 0x00000001;
+ targetGroup_ = other.targetGroup_;
+ onChanged();
+ }
+ if (serversBuilder_ == null) {
+ if (!other.servers_.isEmpty()) {
+ if (servers_.isEmpty()) {
+ servers_ = other.servers_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureServersIsMutable();
+ servers_.addAll(other.servers_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.servers_.isEmpty()) {
+ if (serversBuilder_.isEmpty()) {
+ serversBuilder_.dispose();
+ serversBuilder_ = null;
+ servers_ = other.servers_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ serversBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getServersFieldBuilder() : null;
+ } else {
+ serversBuilder_.addAllMessages(other.servers_);
+ }
+ }
+ }
+ if (tableNameBuilder_ == null) {
+ if (!other.tableName_.isEmpty()) {
+ if (tableName_.isEmpty()) {
+ tableName_ = other.tableName_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureTableNameIsMutable();
+ tableName_.addAll(other.tableName_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.tableName_.isEmpty()) {
+ if (tableNameBuilder_.isEmpty()) {
+ tableNameBuilder_.dispose();
+ tableNameBuilder_ = null;
+ tableName_ = other.tableName_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ tableNameBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTableNameFieldBuilder() : null;
+ } else {
+ tableNameBuilder_.addAllMessages(other.tableName_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTargetGroup()) {
+
+ return false;
+ }
+ for (int i = 0; i < getServersCount(); i++) {
+ if (!getServers(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getTableNameCount(); i++) {
+ if (!getTableName(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string target_group = 1;
+ private java.lang.Object targetGroup_ = "";
+ /**
+ * required string target_group = 1;
+ */
+ public boolean hasTargetGroup() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public java.lang.String getTargetGroup() {
+ java.lang.Object ref = targetGroup_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ targetGroup_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTargetGroupBytes() {
+ java.lang.Object ref = targetGroup_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetGroup_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public Builder setTargetGroup(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ targetGroup_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public Builder clearTargetGroup() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ targetGroup_ = getDefaultInstance().getTargetGroup();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string target_group = 1;
+ */
+ public Builder setTargetGroupBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ targetGroup_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.ServerName servers = 2;
+ private java.util.List servers_ =
+ java.util.Collections.emptyList();
+ private void ensureServersIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ servers_ = new java.util.ArrayList(servers_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serversBuilder_;
+
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public java.util.List getServersList() {
+ if (serversBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(servers_);
+ } else {
+ return serversBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public int getServersCount() {
+ if (serversBuilder_ == null) {
+ return servers_.size();
+ } else {
+ return serversBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) {
+ if (serversBuilder_ == null) {
+ return servers_.get(index);
+ } else {
+ return serversBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder setServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.set(index, value);
+ onChanged();
+ } else {
+ serversBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder setServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.add(value);
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder addServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.add(index, value);
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder addServers(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.add(builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder addServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder addAllServers(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ super.addAll(values, servers_);
+ onChanged();
+ } else {
+ serversBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder clearServers() {
+ if (serversBuilder_ == null) {
+ servers_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ serversBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public Builder removeServers(int index) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.remove(index);
+ onChanged();
+ } else {
+ serversBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServersBuilder(
+ int index) {
+ return getServersFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index) {
+ if (serversBuilder_ == null) {
+ return servers_.get(index); } else {
+ return serversBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList() {
+ if (serversBuilder_ != null) {
+ return serversBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(servers_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder() {
+ return getServersFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder(
+ int index) {
+ return getServersFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+ public java.util.List
+ getServersBuilderList() {
+ return getServersFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersFieldBuilder() {
+ if (serversBuilder_ == null) {
+ serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+ servers_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ servers_ = null;
+ }
+ return serversBuilder_;
+ }
+
+ // repeated .hbase.pb.TableName table_name = 3;
+ private java.util.List tableName_ =
+ java.util.Collections.emptyList();
+ private void ensureTableNameIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ tableName_ = new java.util.ArrayList(tableName_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public java.util.List getTableNameList() {
+ if (tableNameBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(tableName_);
+ } else {
+ return tableNameBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public int getTableNameCount() {
+ if (tableNameBuilder_ == null) {
+ return tableName_.size();
+ } else {
+ return tableNameBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) {
+ if (tableNameBuilder_ == null) {
+ return tableName_.get(index);
+ } else {
+ return tableNameBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder setTableName(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableNameIsMutable();
+ tableName_.set(index, value);
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder setTableName(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ ensureTableNameIsMutable();
+ tableName_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableNameIsMutable();
+ tableName_.add(value);
+ onChanged();
+ } else {
+ tableNameBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder addTableName(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableNameIsMutable();
+ tableName_.add(index, value);
+ onChanged();
+ } else {
+ tableNameBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder addTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ ensureTableNameIsMutable();
+ tableName_.add(builderForValue.build());
+ onChanged();
+ } else {
+ tableNameBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder addTableName(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ ensureTableNameIsMutable();
+ tableName_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableNameBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder addAllTableName(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
+ if (tableNameBuilder_ == null) {
+ ensureTableNameIsMutable();
+ super.addAll(values, tableName_);
+ onChanged();
+ } else {
+ tableNameBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public Builder removeTableName(int index) {
+ if (tableNameBuilder_ == null) {
+ ensureTableNameIsMutable();
+ tableName_.remove(index);
+ onChanged();
+ } else {
+ tableNameBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder(
+ int index) {
+ return getTableNameFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(
+ int index) {
+ if (tableNameBuilder_ == null) {
+ return tableName_.get(index); } else {
+ return tableNameBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameOrBuilderList() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(tableName_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() {
+ return getTableNameFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder(
+ int index) {
+ return getTableNameFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableName table_name = 3;
+ */
+ public java.util.List
+ getTableNameBuilderList() {
+ return getTableNameFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ ((bitField0_ & 0x00000004) == 0x00000004),
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersAndTablesRequest)
+ }
+
+ static {
+ defaultInstance = new MoveServersAndTablesRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersAndTablesRequest)
+ }
+
+ public interface MoveServersAndTablesResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code hbase.pb.MoveServersAndTablesResponse}
+ */
+ public static final class MoveServersAndTablesResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements MoveServersAndTablesResponseOrBuilder {
+ // Use MoveServersAndTablesResponse.newBuilder() to construct.
+ private MoveServersAndTablesResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MoveServersAndTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MoveServersAndTablesResponse defaultInstance;
+ public static MoveServersAndTablesResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MoveServersAndTablesResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MoveServersAndTablesResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public MoveServersAndTablesResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MoveServersAndTablesResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.MoveServersAndTablesResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersAndTablesResponse)
+ }
+
+ static {
+ defaultInstance = new MoveServersAndTablesResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersAndTablesResponse)
+ }
+
/**
* Protobuf service {@code hbase.pb.RSGroupAdminService}
*/
@@ -10834,6 +12449,14 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc MoveServersAndTables(.hbase.pb.MoveServersAndTablesRequest) returns (.hbase.pb.MoveServersAndTablesResponse);
+ */
+ public abstract void moveServersAndTables(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -10911,6 +12534,14 @@ public final class RSGroupAdminProtos {
impl.listRSGroupInfos(controller, request, done);
}
+ @java.lang.Override
+ public void moveServersAndTables(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.moveServersAndTables(controller, request, done);
+ }
+
};
}
@@ -10951,6 +12582,8 @@ public final class RSGroupAdminProtos {
return impl.balanceRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest)request);
case 8:
return impl.listRSGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)request);
+ case 9:
+ return impl.moveServersAndTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -10983,6 +12616,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11015,6 +12650,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11095,6 +12732,14 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc MoveServersAndTables(.hbase.pb.MoveServersAndTablesRequest) returns (.hbase.pb.MoveServersAndTablesResponse);
+ */
+ public abstract void moveServersAndTables(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -11162,6 +12807,11 @@ public final class RSGroupAdminProtos {
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 9:
+ this.moveServersAndTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11194,6 +12844,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11226,6 +12878,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11381,6 +13035,21 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance()));
}
+
+ public void moveServersAndTables(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(9),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -11433,6 +13102,11 @@ public final class RSGroupAdminProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse moveServersAndTables(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -11549,6 +13223,18 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse moveServersAndTables(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(9),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:hbase.pb.RSGroupAdminService)
@@ -11654,6 +13340,16 @@ public final class RSGroupAdminProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -11692,28 +13388,35 @@ public final class RSGroupAdminProtos {
"t\022$\n\006server\030\002 \002(\0132\024.hbase.pb.ServerName\"" +
"O\n\036GetRSGroupInfoOfServerResponse\022-\n\016r_s" +
"_group_info\030\001 \001(\0132\025.hbase.pb.RSGroupInfo" +
- "2\241\006\n\023RSGroupAdminService\022S\n\016GetRSGroupIn",
- "fo\022\037.hbase.pb.GetRSGroupInfoRequest\032 .hb" +
- "ase.pb.GetRSGroupInfoResponse\022h\n\025GetRSGr" +
- "oupInfoOfTable\022&.hbase.pb.GetRSGroupInfo" +
- "OfTableRequest\032\'.hbase.pb.GetRSGroupInfo" +
- "OfTableResponse\022k\n\026GetRSGroupInfoOfServe" +
- "r\022\'.hbase.pb.GetRSGroupInfoOfServerReque" +
- "st\032(.hbase.pb.GetRSGroupInfoOfServerResp" +
- "onse\022J\n\013MoveServers\022\034.hbase.pb.MoveServe" +
- "rsRequest\032\035.hbase.pb.MoveServersResponse" +
- "\022G\n\nMoveTables\022\033.hbase.pb.MoveTablesRequ",
- "est\032\034.hbase.pb.MoveTablesResponse\022G\n\nAdd" +
- "RSGroup\022\033.hbase.pb.AddRSGroupRequest\032\034.h" +
- "base.pb.AddRSGroupResponse\022P\n\rRemoveRSGr" +
- "oup\022\036.hbase.pb.RemoveRSGroupRequest\032\037.hb" +
- "ase.pb.RemoveRSGroupResponse\022S\n\016BalanceR" +
- "SGroup\022\037.hbase.pb.BalanceRSGroupRequest\032" +
- " .hbase.pb.BalanceRSGroupResponse\022Y\n\020Lis" +
- "tRSGroupInfos\022!.hbase.pb.ListRSGroupInfo" +
- "sRequest\032\".hbase.pb.ListRSGroupInfosResp" +
- "onseBH\n*org.apache.hadoop.hbase.protobuf",
- ".generatedB\022RSGroupAdminProtosH\001\210\001\001\240\001\001"
+ "\"\203\001\n\033MoveServersAndTablesRequest\022\024\n\014targ",
+ "et_group\030\001 \002(\t\022%\n\007servers\030\002 \003(\0132\024.hbase." +
+ "pb.ServerName\022\'\n\ntable_name\030\003 \003(\0132\023.hbas" +
+ "e.pb.TableName\"\036\n\034MoveServersAndTablesRe" +
+ "sponse2\210\007\n\023RSGroupAdminService\022S\n\016GetRSG" +
+ "roupInfo\022\037.hbase.pb.GetRSGroupInfoReques" +
+ "t\032 .hbase.pb.GetRSGroupInfoResponse\022h\n\025G" +
+ "etRSGroupInfoOfTable\022&.hbase.pb.GetRSGro" +
+ "upInfoOfTableRequest\032\'.hbase.pb.GetRSGro" +
+ "upInfoOfTableResponse\022k\n\026GetRSGroupInfoO" +
+ "fServer\022\'.hbase.pb.GetRSGroupInfoOfServe",
+ "rRequest\032(.hbase.pb.GetRSGroupInfoOfServ" +
+ "erResponse\022J\n\013MoveServers\022\034.hbase.pb.Mov" +
+ "eServersRequest\032\035.hbase.pb.MoveServersRe" +
+ "sponse\022G\n\nMoveTables\022\033.hbase.pb.MoveTabl" +
+ "esRequest\032\034.hbase.pb.MoveTablesResponse\022" +
+ "G\n\nAddRSGroup\022\033.hbase.pb.AddRSGroupReque" +
+ "st\032\034.hbase.pb.AddRSGroupResponse\022P\n\rRemo" +
+ "veRSGroup\022\036.hbase.pb.RemoveRSGroupReques" +
+ "t\032\037.hbase.pb.RemoveRSGroupResponse\022S\n\016Ba" +
+ "lanceRSGroup\022\037.hbase.pb.BalanceRSGroupRe",
+ "quest\032 .hbase.pb.BalanceRSGroupResponse\022" +
+ "Y\n\020ListRSGroupInfos\022!.hbase.pb.ListRSGro" +
+ "upInfosRequest\032\".hbase.pb.ListRSGroupInf" +
+ "osResponse\022e\n\024MoveServersAndTables\022%.hba" +
+ "se.pb.MoveServersAndTablesRequest\032&.hbas" +
+ "e.pb.MoveServersAndTablesResponseBH\n*org" +
+ ".apache.hadoop.hbase.protobuf.generatedB" +
+ "\022RSGroupAdminProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11840,6 +13543,18 @@ public final class RSGroupAdminProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor,
new java.lang.String[] { "RSGroupInfo", });
+ internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor =
+ getDescriptor().getMessageTypes().get(20);
+ internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor,
+ new java.lang.String[] { "TargetGroup", "Servers", "TableName", });
+ internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor =
+ getDescriptor().getMessageTypes().get(21);
+ internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor,
+ new java.lang.String[] { });
return null;
}
};
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
index 20fdaa2..c7e6f85 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
@@ -78,4 +78,14 @@ public interface RSGroupAdmin {
* @param hostPort HostPort to get RSGroupInfo for
*/
RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException;
+
+ /**
+ * Move given set of servers and tables to the specified target RegionServer group.
+ * @param servers set of servers to move
+ * @param tables set of tables to move
+ * @param targetGroup the target group name
+ * @throws IOException
+ */
+ void moveServersAndTables(Set servers, Set tables,
+ String targetGroup) throws IOException;
}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index dfec736..9a45e6e 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupI
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
@@ -183,4 +184,25 @@ class RSGroupAdminClient implements RSGroupAdmin {
throw ProtobufUtil.handleRemoteException(e);
}
}
-}
+
+ @Override
+ public void moveServersAndTables(Set servers, Set tables, String targetGroup)
+ throws IOException {
+ MoveServersAndTablesRequest.Builder builder =
+ MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup);
+ for(Address el: servers) {
+ builder.addServers(HBaseProtos.ServerName.newBuilder()
+ .setHostName(el.getHostname())
+ .setPort(el.getPort())
+ .build());
+ }
+ for(TableName tableName: tables) {
+ builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
+ }
+ try {
+ stub.moveServersAndTables(null, builder.build());
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index b917716..83389e4 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -58,6 +58,8 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupI
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
@@ -243,6 +245,26 @@ public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService
}
done.run(builder.build());
}
+
+ @Override
+ public void moveServersAndTables(RpcController controller,
+ MoveServersAndTablesRequest request, RpcCallback done) {
+ MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder();
+ try {
+ Set hostPorts = Sets.newHashSet();
+ for (HBaseProtos.ServerName el : request.getServersList()) {
+ hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
+ }
+ Set tables = new HashSet<>(request.getTableNameList().size());
+ for (HBaseProtos.TableName tableName : request.getTableNameList()) {
+ tables.add(ProtobufUtil.toTableName(tableName));
+ }
+ groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup());
+ } catch (IOException e) {
+ CoprocessorRpcUtils.setControllerException(controller, e);
+ }
+ done.run(builder.build());
+ }
}
/////////////////////////////////////////////////////////////////////////////
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 811cf71..3c0cccf 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -138,6 +138,139 @@ public class RSGroupAdminServer implements RSGroupAdmin {
else regions.addFirst(hri);
}
+ /**
+ * Check servers and tables.
+ * Fail if nulls or if servers and tables not belong to the same group
+ * @param servers servers to move
+ * @param tables tables to move
+ * @param targetGroupName target group name
+ * @throws IOException
+ */
+ private void checkServersAndTables(Set servers, Set tables,
+ String targetGroupName) throws IOException {
+ // Presume first server's source group. Later ensure all servers are from this group.
+ Address firstServer = servers.iterator().next();
+ RSGroupInfo tmpSrcGrp = rsGroupInfoManager.getRSGroupOfServer(firstServer);
+ if (tmpSrcGrp == null) {
+ // Be careful. This exception message is tested for in TestRSGroupsBase...
+ throw new ConstraintException("Source RSGroup for server " + firstServer
+ + " does not exist.");
+ }
+ RSGroupInfo srcGrp = new RSGroupInfo(tmpSrcGrp);
+ if (srcGrp.getName().equals(targetGroupName)) {
+ throw new ConstraintException( "Target RSGroup " + targetGroupName +
+ " is same as source " + srcGrp.getName() + " RSGroup.");
+ }
+ // Only move online servers
+ checkOnlineServersOnly(servers);
+
+ // Ensure all servers are of same rsgroup.
+ for (Address server: servers) {
+ String tmpGroup = rsGroupInfoManager.getRSGroupOfServer(server).getName();
+ if (!tmpGroup.equals(srcGrp.getName())) {
+ throw new ConstraintException("Move server request should only come from one source " +
+ "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup);
+ }
+ }
+
+ // Ensure all tables and servers are of same rsgroup.
+ for (TableName table : tables) {
+ String tmpGroup = rsGroupInfoManager.getRSGroupOfTable(table);
+ if (!tmpGroup.equals(srcGrp.getName())) {
+ throw new ConstraintException("Move table request should only come from one source " +
+ "RSGroup. Expecting only " + srcGrp.getName() + " but contains " + tmpGroup);
+ }
+ }
+
+ if (srcGrp.getServers().size() <= servers.size()
+ && srcGrp.getTables().size() > tables.size() ) {
+ throw new ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() +
+ " that contains tables without servers to host them.");
+ }
+ }
+
+ /**
+ * @param servers the servers that will move to new group
+ * @param targetGroupName the target group name
+ * @param tables The regions of tables assigned to these servers will not unassign
+ * @throws IOException
+ */
+ private void unassignRegionFromServers(Set servers, String targetGroupName,
+ Set tables) throws IOException {
+ boolean foundRegionsToUnassign;
+ RSGroupInfo targetGrp = getRSGroupInfo(targetGroupName);
+ Set allSevers = new HashSet<>(servers);
+ do {
+ foundRegionsToUnassign = false;
+ for (Iterator iter = allSevers.iterator(); iter.hasNext();) {
+ Address rs = iter.next();
+ // Get regions that are associated with this server and filter regions by tables.
+ List regions = new ArrayList<>();
+ for (HRegionInfo region : getRegions(rs)) {
+ if (!tables.contains(region.getTable())) {
+ regions.add(region);
+ }
+ }
+
+ LOG.info("Unassigning " + regions.size() +
+ " region(s) from " + rs + " for server move to " + targetGroupName);
+ if (!regions.isEmpty()) {
+ for (HRegionInfo region: regions) {
+ // Regions might get assigned from tables of target group so we need to filter
+ if (!targetGrp.containsTable(region.getTable())) {
+ this.master.getAssignmentManager().unassign(region);
+ if (master.getAssignmentManager().getRegionStates().
+ getRegionState(region).isFailedOpen()) {
+ continue;
+ }
+ foundRegionsToUnassign = true;
+ }
+ }
+ }
+ if (!foundRegionsToUnassign) {
+ iter.remove();
+ }
+ }
+ try {
+ rsGroupInfoManager.wait(1000);
+ } catch (InterruptedException e) {
+ LOG.warn("Sleep interrupted", e);
+ Thread.currentThread().interrupt();
+ }
+ } while (foundRegionsToUnassign);
+ }
+
+ /**
+ * @param tables the tables that will move to new group
+ * @param targetGroupName the target group name
+ * @param servers the regions of tables assigned to these servers will not unassign
+ * @throws IOException
+ */
+ private void unassignRegionFromTables(Set tables, String targetGroupName,
+ Set servers) throws IOException {
+ for (TableName table: tables) {
+ LOG.info("Unassigning region(s) from " + table + " for table move to " + targetGroupName);
+ LockManager.MasterLock lock = master.getLockManager().createMasterLock(table,
+ LockProcedure.LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move");
+ try {
+ try {
+ lock.acquire();
+ } catch (InterruptedException e) {
+ throw new IOException("Interrupted when waiting for table lock", e);
+ }
+ for (HRegionInfo region :
+ master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) {
+ ServerName sn = master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(region);
+ if (!servers.contains(sn.getAddress())) {
+ master.getAssignmentManager().unassign(region);
+ }
+ }
+ } finally {
+ lock.release();
+ }
+ }
+ }
+
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE",
justification="Ignoring complaint because don't know what it is complaining about")
@@ -420,6 +553,45 @@ public class RSGroupAdminServer implements RSGroupAdmin {
return rsGroupInfoManager.getRSGroupOfServer(hostPort);
}
+ @Override
+ public void moveServersAndTables(Set servers, Set tables, String targetGroup)
+ throws IOException {
+ if (servers == null || servers.isEmpty() ) {
+ throw new ConstraintException("The list of servers to move cannot be null or empty.");
+ }
+ if (tables == null || tables.isEmpty()) {
+ throw new ConstraintException("The list of tables to move cannot be null or empty.");
+ }
+
+ //check target group
+ getAndCheckRSGroupInfo(targetGroup);
+
+ // Hold a lock on the manager instance while moving servers and tables to prevent
+ // another writer changing our state while we are working.
+ synchronized (rsGroupInfoManager) {
+ if (master.getMasterCoprocessorHost() != null) {
+ master.getMasterCoprocessorHost().preMoveServersAndTables(servers, tables, targetGroup);
+ }
+ //check servers and tables status
+ checkServersAndTables(servers, tables, targetGroup);
+
+ //Move servers and tables to a new group.
+ String srcGroup = getRSGroupOfServer(servers.iterator().next()).getName();
+ rsGroupInfoManager.moveServersAndTables(servers, tables, srcGroup, targetGroup);
+
+ //unassign regions which not belong to these tables
+ unassignRegionFromServers(servers, targetGroup, tables);
+ //unassign regions which not assigned to these servers
+ unassignRegionFromTables(tables, targetGroup, servers);
+
+ if (master.getMasterCoprocessorHost() != null) {
+ master.getMasterCoprocessorHost().postMoveServersAndTables(servers, tables, targetGroup);
+ }
+ }
+ LOG.info("Move servers and tables done. Severs :"
+ + servers + " , Tables : " + tables + " => " + targetGroup);
+ }
+
private Map rsGroupGetRegionsInTransition(String groupName)
throws IOException {
Map rit = Maps.newTreeMap();
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 88ea04b..ab5c09f 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -104,4 +104,14 @@ public interface RSGroupInfoManager {
* @return whether the manager is in online mode
*/
boolean isOnline();
+
+ /**
+ * Move servers and tables to a new group.
+ * @param servers list of servers, must be part of the same group
+ * @param tables set of tables to move
+ * @param srcGroup groupName being moved from
+ * @param dstGroup groupName being moved to
+ */
+ void moveServersAndTables(Set servers, Set tables,
+ String srcGroup, String dstGroup) throws IOException;
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6d157cc..9f77c77 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -276,6 +276,30 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
return rsGroupStartupWorker.isOnline();
}
+ @Override
+ public void moveServersAndTables(Set servers, Set tables,
+ String srcGroup, String dstGroup) throws IOException {
+ //get server's group
+ RSGroupInfo srcGroupInfo = getRSGroupInfo(srcGroup);
+ RSGroupInfo dstGroupInfo = getRSGroupInfo(dstGroup);
+
+ //move servers
+ for (Address el: servers) {
+ srcGroupInfo.removeServer(el);
+ dstGroupInfo.addServer(el);
+ }
+ //move tables
+ for(TableName tableName: tables) {
+ srcGroupInfo.removeTable(tableName);
+ dstGroupInfo.addTable(tableName);
+ }
+
+ //flush changed groupinfo
+ Map newGroupMap = Maps.newHashMap(rsGroupMap);
+ newGroupMap.put(srcGroupInfo.getName(), srcGroupInfo);
+ newGroupMap.put(dstGroupInfo.getName(), dstGroupInfo);
+ flushConfig(newGroupMap);
+ }
List retrieveGroupListFromGroupTable() throws IOException {
List rsGroupInfoList = Lists.newArrayList();
diff --git a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto b/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto
index fda9b09..0213402 100644
--- a/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto
+++ b/hbase-rsgroup/src/main/protobuf/RSGroupAdmin.proto
@@ -106,6 +106,15 @@ message GetRSGroupInfoOfServerResponse {
optional RSGroupInfo r_s_group_info = 1;
}
+message MoveServersAndTablesRequest {
+ required string target_group = 1;
+ repeated ServerName servers = 2;
+ repeated TableName table_name = 3;
+}
+
+message MoveServersAndTablesResponse {
+}
+
service RSGroupAdminService {
rpc GetRSGroupInfo(GetRSGroupInfoRequest)
returns (GetRSGroupInfoResponse);
@@ -133,4 +142,7 @@ service RSGroupAdminService {
rpc ListRSGroupInfos(ListRSGroupInfosRequest)
returns (ListRSGroupInfosResponse);
+
+ rpc MoveServersAndTables(MoveServersAndTablesRequest)
+ returns (MoveServersAndTablesResponse);
}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 59853a5..e8cdb78 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -691,4 +691,115 @@ public abstract class TestRSGroupsBase {
assertTrue(newGroupTables.contains(tableNameA));
assertTrue(newGroupTables.contains(tableNameB));
}
+
+ @Test
+ public void testMoveServersAndTables() throws Exception {
+ final RSGroupInfo newGroup = addGroup(getGroupName(name.getMethodName()), 1);
+ //create table
+ final byte[] familyNameBytes = Bytes.toBytes("f");
+ TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5);
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ List regions = getTableRegionMap().get(tableName);
+ if (regions == null)
+ return false;
+ return getTableRegionMap().get(tableName).size() >= 5;
+ }
+ });
+
+ //get server which is not a member of new group
+ ServerName targetServer = null;
+ for(ServerName server : admin.getClusterStatus().getServers()) {
+ if(!newGroup.containsServer(server.getAddress()) &&
+ !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) {
+ targetServer = server;
+ break;
+ }
+ }
+
+ //test fail bogus server move
+ try {
+ rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromString("foo:9999")),
+ Sets.newHashSet(tableName), newGroup.getName());
+ fail("Bogus servers shouldn't have been successfully moved.");
+ } catch(IOException ex) {
+ String exp = "Source RSGroup for server foo:9999 does not exist.";
+ String msg = "Expected '" + exp + "' in exception message: ";
+ assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp));
+ }
+
+ //test fail server move
+ try {
+ rsGroupAdmin.moveServersAndTables(Sets.newHashSet(targetServer.getAddress()),
+ Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP);
+ fail("servers shouldn't have been successfully moved.");
+ } catch(IOException ex) {
+ String exp = "Target RSGroup " + RSGroupInfo.DEFAULT_GROUP +
+ " is same as source " + RSGroupInfo.DEFAULT_GROUP + " RSGroup.";
+ String msg = "Expected '" + exp + "' in exception message: ";
+ assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp));
+ }
+
+ //verify default group info
+ Assert.assertEquals(3,
+ rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size());
+ Assert.assertEquals(4,
+ rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size());
+
+ //verify new group info
+ Assert.assertEquals(1,
+ rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers().size());
+ Assert.assertEquals(0,
+ rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size());
+
+ //get all region to move targetServer
+ List regionList = getTableRegionMap().get(tableName);
+ for(String region : regionList) {
+ // Lets move this region to the targetServer
+ TEST_UTIL.getAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(region))),
+ Bytes.toBytes(targetServer.getServerName()));
+ }
+
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return getTableRegionMap().get(tableName) != null &&
+ getTableRegionMap().get(tableName).size() == 5 &&
+ getTableServerRegionMap().get(tableName).size() == 1 &&
+ admin.getClusterStatus().getRegionsInTransition().size() < 1;
+ }
+ });
+
+ //verify that all region move to targetServer
+ Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size());
+
+ //move targetServer and table to newGroup
+ LOG.info("moving server and table to newGroup");
+ rsGroupAdmin.moveServersAndTables(Sets.newHashSet(targetServer.getAddress()),
+ Sets.newHashSet(tableName), newGroup.getName());
+
+ //verify group change
+ Assert.assertEquals(newGroup.getName(),
+ rsGroupAdmin.getRSGroupInfoOfTable(tableName).getName());
+
+ //verify servers' not exist in old group
+ Set defaultServers = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers();
+ assertFalse(defaultServers.contains(targetServer.getAddress()));
+
+ //verify servers' exist in new group
+ Set newGroupServers = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers();
+ assertTrue(newGroupServers.contains(targetServer.getAddress()));
+
+ //verify tables' not exist in old group
+ Set defaultTables = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables();
+ assertFalse(defaultTables.contains(tableName));
+
+ //verify tables' exist in new group
+ Set newGroupTables = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables();
+ assertTrue(newGroupTables.contains(tableName));
+
+ //verify that all region still assgin on targetServer
+ Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size());
+ }
}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index 77b7cea..2e89110 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -103,6 +103,12 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
return wrapped.getRSGroupOfServer(hostPort);
}
+ @Override
+ public void moveServersAndTables(Set servers, Set tables, String targetGroup) throws IOException {
+ wrapped.moveServersAndTables(servers, tables, targetGroup);
+ verify();
+ }
+
public void verify() throws IOException {
Map groupMap = Maps.newHashMap();
Set zList = Sets.newHashSet();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 9d7a395..aab852c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1652,6 +1652,24 @@ public interface MasterObserver extends Coprocessor {
* @param servers set of servers to move
* @param targetGroup destination group
*/
+ default void preMoveServersAndTables(final ObserverContext ctx,
+ Set servers, Set tables, String targetGroup) throws IOException {}
+
+ /**
+ * Called after servers are moved to target region server group
+ * @param ctx the environment to interact with the framework and master
+ * @param servers set of servers to move
+ * @param targetGroup name of group
+ */
+ default void postMoveServersAndTables(final ObserverContext ctx,
+ Set servers, Set tables, String targetGroup) throws IOException {}
+
+ /**
+ * Called before servers are moved to target region server group
+ * @param ctx the environment to interact with the framework and master
+ * @param servers set of servers to move
+ * @param targetGroup destination group
+ */
default void preMoveServers(final ObserverContext ctx,
Set servers, String targetGroup) throws IOException {}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 7f296f4..8a7a387 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -1511,6 +1511,32 @@ public class MasterCoprocessorHost
return bypass;
}
+ public void preMoveServersAndTables(final Set servers, final Set tables, final String targetGroup)
+ throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver oserver,
+ ObserverContext ctx) throws IOException {
+ if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+ oserver.preMoveServersAndTables(ctx, servers, tables, targetGroup);
+ }
+ }
+ });
+ }
+
+ public void postMoveServersAndTables(final Set servers, final Set tables, final String targetGroup)
+ throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver oserver,
+ ObserverContext ctx) throws IOException {
+ if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+ oserver.postMoveServersAndTables(ctx, servers, tables, targetGroup);
+ }
+ }
+ });
+ }
+
public void preMoveServers(final Set servers, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 64ac900..a38d705 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2668,6 +2668,12 @@ public class AccessController implements MasterObserver, RegionObserver, RegionS
}
@Override
+ public void preMoveServersAndTables(ObserverContext ctx,
+ Set servers, Set tables, String targetGroup) throws IOException {
+ requirePermission(getActiveUser(ctx), "moveServersAndTables", Action.ADMIN);
+ }
+
+ @Override
public void preMoveServers(ObserverContext ctx,
Set servers, String targetGroup) throws IOException {
requirePermission(getActiveUser(ctx), "moveServers", Action.ADMIN);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 029cdae..6b52e0c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -1461,6 +1461,16 @@ public class TestMasterObserver {
}
@Override
+ public void preMoveServersAndTables(ObserverContext ctx,
+ Set servers, Set tables, String targetGroup) throws IOException {
+ }
+
+ @Override
+ public void postMoveServersAndTables(ObserverContext ctx,
+ Set servers, Set tables,String targetGroup) throws IOException {
+ }
+
+ @Override
public void preMoveServers(ObserverContext ctx,
Set servers, String targetGroup) throws IOException {
}
diff --git a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
index 6847f8b..3b71062 100644
--- a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
@@ -142,5 +142,19 @@ module Hbase
res
end
+ #--------------------------------------------------------------------------
+ # move server and table to a group
+ def move_servers_tables(dest, *args)
+ servers = java.util.HashSet.new
+ tables = java.util.HashSet.new;
+ args[0].each do |s|
+ servers.add(org.apache.hadoop.hbase.net.Address.fromString(s))
+ end
+ args[1].each do |t|
+ tables.add(org.apache.hadoop.hbase.TableName.valueOf(t))
+ end
+ @admin.moveServersAndTables(servers, tables, dest)
+ end
+
end
end
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index b112b21..66480f9 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -470,6 +470,7 @@ Shell.load_command_group(
balance_rsgroup
move_servers_rsgroup
move_tables_rsgroup
+ move_servers_tables_rsgroup
get_server_rsgroup
get_table_rsgroup
]
diff --git a/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb
new file mode 100644
index 0000000..5337141
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class MoveServersTablesRsgroup < Command
+ def help
+ return <<-EOF
+Reassign RegionServers and Tables from one group to another.
+
+Example:
+
+ hbase> move_servers_tables_rsgroup 'dest',['server1:port','server2:port'],['table1','table2']
+
+EOF
+ end
+
+ def command(dest, servers, tables)
+ rsgroup_admin.move_servers_tables(dest, servers, tables)
+ end
+ end
+ end
+end