diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index c3a94e3..0b299b8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -1094,7 +1094,16 @@ public class HTable implements HTableInterface, RegionLocator { regionMutationBuilder.setAtomic(true); MultiRequest request = MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build(); - getStub().multi(controller, request); + ClientProtos.MultiResponse resp = getStub().multi(controller, request); + + ClientProtos.RegionActionResult res = resp.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if(ex instanceof IOException) { + throw (IOException)ex; + } + throw new IOException("Failed to mutate row", ex); + } } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java new file mode 100644 index 0000000..c4c5b69 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java @@ -0,0 +1,88 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.junit.After; +import org.junit.Before; +import org.junit.experimental.categories.Category; + +@Category(IntegrationTests.class) +public class IntegrationTestGroup extends TestGroupsBase{ + //Integration specific + private final static Log LOG = LogFactory.getLog(IntegrationTestGroup.class); + private static boolean initialized = false; + + @Before + public void beforeMethod() throws Exception { + if(!initialized) { + LOG.info("Setting up IntegrationTestGroup"); + LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers"); + TEST_UTIL = new IntegrationTestingUtility(); + ((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE); + //set shared configs + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseClusterInterface(); + groupAdmin = new VerifyingGroupAdminClient(TEST_UTIL.getConfiguration()); + LOG.info("Done initializing cluster"); + initialized = true; + //cluster may not be clean + //cleanup when initializing + afterMethod(); + } + } + + @After + public void afterMethod() throws Exception { + LOG.info("Cleaning up previous test run"); + //cleanup previous artifacts + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + admin.setBalancerRunning(false,true); + + LOG.info("Restoring the cluster"); + ((IntegrationTestingUtility)TEST_UTIL).restoreCluster(); + LOG.info("Done restoring the cluster"); + + groupAdmin.addGroup("master"); + groupAdmin.moveServers( + Sets.newHashSet(cluster.getInitialClusterStatus().getMaster().getHostAndPort()), + "master"); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + == NUM_SLAVES_BASE; + } + }); + LOG.info("Done cleaning up previous test run"); + } +} \ No newline at end of file diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java new file mode 100644 index 0000000..7fecbe4 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java @@ -0,0 +1,13940 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: RSGroup.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class RSGroupProtos { + private RSGroupProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface GroupInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // repeated string servers = 2; + /** + * repeated string servers = 2; + */ + java.util.List + getServersList(); + /** + * repeated string servers = 2; + */ + int getServersCount(); + /** + * repeated string servers = 2; + */ + java.lang.String getServers(int index); + /** + * repeated string servers = 2; + */ + com.google.protobuf.ByteString + getServersBytes(int index); + + // repeated .TableName tables = 3; + /** + * repeated .TableName tables = 3; + */ + java.util.List + getTablesList(); + /** + * repeated .TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .TableName tables = 3; + */ + int getTablesCount(); + /** + * repeated .TableName tables = 3; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + } + /** + * Protobuf type {@code GroupInfo} + */ + public static final class GroupInfo extends + com.google.protobuf.GeneratedMessage + implements GroupInfoOrBuilder { + // Use GroupInfo.newBuilder() to construct. + private GroupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GroupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GroupInfo defaultInstance; + public static GroupInfo getDefaultInstance() { + return defaultInstance; + } + + public GroupInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GroupInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readBytes()); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.UnmodifiableLazyStringList(servers_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GroupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GroupInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string servers = 2; + public static final int SERVERS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList servers_; + /** + * repeated string servers = 2; + */ + public java.util.List + getServersList() { + return servers_; + } + /** + * repeated string servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated string servers = 2; + */ + public java.lang.String getServers(int index) { + return servers_.get(index); + } + /** + * repeated string servers = 2; + */ + public com.google.protobuf.ByteString + getServersBytes(int index) { + return servers_.getByteString(index); + } + + // repeated .TableName tables = 3; + public static final int TABLES_FIELD_NUMBER = 3; + private java.util.List tables_; + /** + * repeated .TableName tables = 3; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .TableName tables = 3; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + private void initFields() { + name_ = ""; + servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + tables_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeBytes(2, servers_.getByteString(i)); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(3, tables_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < servers_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(servers_.getByteString(i)); + } + size += dataSize; + size += 1 * getServersList().size(); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tables_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GroupInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + tablesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.UnmodifiableLazyStringList( + servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // repeated string servers = 2; + private com.google.protobuf.LazyStringList servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.LazyStringArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string servers = 2; + */ + public java.util.List + getServersList() { + return java.util.Collections.unmodifiableList(servers_); + } + /** + * repeated string servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated string servers = 2; + */ + public java.lang.String getServers(int index) { + return servers_.get(index); + } + /** + * repeated string servers = 2; + */ + public com.google.protobuf.ByteString + getServersBytes(int index) { + return servers_.getByteString(index); + } + /** + * repeated string servers = 2; + */ + public Builder setServers( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder addServers( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder addAllServers( + java.lang.Iterable values) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder clearServers() { + servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder addServersBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + return this; + } + + // repeated .TableName tables = 3; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .TableName tables = 3; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .TableName tables = 3; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName tables = 3; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GroupInfo) + } + + static { + defaultInstance = new GroupInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GroupInfo) + } + + public interface ListTablesOfGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code ListTablesOfGroupRequest} + */ + public static final class ListTablesOfGroupRequest extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfGroupRequestOrBuilder { + // Use ListTablesOfGroupRequest.newBuilder() to construct. + private ListTablesOfGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfGroupRequest defaultInstance; + public static ListTablesOfGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListTablesOfGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ListTablesOfGroupRequest) + } + + static { + defaultInstance = new ListTablesOfGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListTablesOfGroupRequest) + } + + public interface ListTablesOfGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .TableName table_name = 1; + /** + * repeated .TableName table_name = 1; + */ + java.util.List + getTableNameList(); + /** + * repeated .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index); + /** + * repeated .TableName table_name = 1; + */ + int getTableNameCount(); + /** + * repeated .TableName table_name = 1; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code ListTablesOfGroupResponse} + */ + public static final class ListTablesOfGroupResponse extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfGroupResponseOrBuilder { + // Use ListTablesOfGroupResponse.newBuilder() to construct. + private ListTablesOfGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfGroupResponse defaultInstance; + public static ListTablesOfGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.util.List tableName_; + /** + * repeated .TableName table_name = 1; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .TableName table_name = 1; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(1, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse) obj; + + boolean result = true; + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListTablesOfGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListTablesOfGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse(this); + int from_bitField0_ = bitField0_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance()) return this; + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .TableName table_name = 1; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .TableName table_name = 1; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .TableName table_name = 1; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 1; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ListTablesOfGroupResponse) + } + + static { + defaultInstance = new ListTablesOfGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListTablesOfGroupResponse) + } + + public interface GetGroupInfoRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code GetGroupInfoRequest} + */ + public static final class GetGroupInfoRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoRequestOrBuilder { + // Use GetGroupInfoRequest.newBuilder() to construct. + private GetGroupInfoRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoRequest defaultInstance; + public static GetGroupInfoRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoRequest) + } + + static { + defaultInstance = new GetGroupInfoRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoRequest) + } + + public interface GetGroupInfoResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .GroupInfo group_info = 1; + /** + * optional .GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoResponse} + */ + public static final class GetGroupInfoResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoResponseOrBuilder { + // Use GetGroupInfoResponse.newBuilder() to construct. + private GetGroupInfoResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoResponse defaultInstance; + public static GetGroupInfoResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoResponse) + } + + static { + defaultInstance = new GetGroupInfoResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoResponse) + } + + public interface GetGroupInfoOfTableRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableName table_name = 1; + /** + * required .TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoOfTableRequest} + */ + public static final class GetGroupInfoOfTableRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfTableRequestOrBuilder { + // Use GetGroupInfoOfTableRequest.newBuilder() to construct. + private GetGroupInfoOfTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfTableRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfTableRequest defaultInstance; + public static GetGroupInfoOfTableRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfTableRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfTableRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfTableRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoOfTableRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoOfTableRequest) + } + + static { + defaultInstance = new GetGroupInfoOfTableRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoOfTableRequest) + } + + public interface GetGroupInfoOfTableResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .GroupInfo group_info = 1; + /** + * optional .GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoOfTableResponse} + */ + public static final class GetGroupInfoOfTableResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfTableResponseOrBuilder { + // Use GetGroupInfoOfTableResponse.newBuilder() to construct. + private GetGroupInfoOfTableResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfTableResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfTableResponse defaultInstance; + public static GetGroupInfoOfTableResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfTableResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfTableResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfTableResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfTableResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoOfTableResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupInfoOfTableResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoOfTableResponse) + } + + static { + defaultInstance = new GetGroupInfoOfTableResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoOfTableResponse) + } + + public interface MoveServersRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated string servers = 2; + /** + * repeated string servers = 2; + */ + java.util.List + getServersList(); + /** + * repeated string servers = 2; + */ + int getServersCount(); + /** + * repeated string servers = 2; + */ + java.lang.String getServers(int index); + /** + * repeated string servers = 2; + */ + com.google.protobuf.ByteString + getServersBytes(int index); + } + /** + * Protobuf type {@code MoveServersRequest} + */ + public static final class MoveServersRequest extends + com.google.protobuf.GeneratedMessage + implements MoveServersRequestOrBuilder { + // Use MoveServersRequest.newBuilder() to construct. + private MoveServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersRequest defaultInstance; + public static MoveServersRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveServersRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.UnmodifiableLazyStringList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string servers = 2; + public static final int SERVERS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList servers_; + /** + * repeated string servers = 2; + */ + public java.util.List + getServersList() { + return servers_; + } + /** + * repeated string servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated string servers = 2; + */ + public java.lang.String getServers(int index) { + return servers_.get(index); + } + /** + * repeated string servers = 2; + */ + public com.google.protobuf.ByteString + getServersBytes(int index) { + return servers_.getByteString(index); + } + + private void initFields() { + targetGroup_ = ""; + servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeBytes(2, servers_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < servers_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(servers_.getByteString(i)); + } + size += dataSize; + size += 1 * getServersList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveServersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.UnmodifiableLazyStringList( + servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated string servers = 2; + private com.google.protobuf.LazyStringList servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new com.google.protobuf.LazyStringArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string servers = 2; + */ + public java.util.List + getServersList() { + return java.util.Collections.unmodifiableList(servers_); + } + /** + * repeated string servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated string servers = 2; + */ + public java.lang.String getServers(int index) { + return servers_.get(index); + } + /** + * repeated string servers = 2; + */ + public com.google.protobuf.ByteString + getServersBytes(int index) { + return servers_.getByteString(index); + } + /** + * repeated string servers = 2; + */ + public Builder setServers( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder addServers( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder addAllServers( + java.lang.Iterable values) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder clearServers() { + servers_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string servers = 2; + */ + public Builder addServersBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:MoveServersRequest) + } + + static { + defaultInstance = new MoveServersRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveServersRequest) + } + + public interface MoveServersResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code MoveServersResponse} + */ + public static final class MoveServersResponse extends + com.google.protobuf.GeneratedMessage + implements MoveServersResponseOrBuilder { + // Use MoveServersResponse.newBuilder() to construct. + private MoveServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersResponse defaultInstance; + public static MoveServersResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveServersResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveServersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveServersResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:MoveServersResponse) + } + + static { + defaultInstance = new MoveServersResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveServersResponse) + } + + public interface MoveTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .TableName table_name = 2; + /** + * repeated .TableName table_name = 2; + */ + java.util.List + getTableNameList(); + /** + * repeated .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index); + /** + * repeated .TableName table_name = 2; + */ + int getTableNameCount(); + /** + * repeated .TableName table_name = 2; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code MoveTablesRequest} + */ + public static final class MoveTablesRequest extends + com.google.protobuf.GeneratedMessage + implements MoveTablesRequestOrBuilder { + // Use MoveTablesRequest.newBuilder() to construct. + private MoveTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesRequest defaultInstance; + public static MoveTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private java.util.List tableName_; + /** + * repeated .TableName table_name = 2; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .TableName table_name = 2; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(2, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .TableName table_name = 2; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .TableName table_name = 2; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .TableName table_name = 2; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 2; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MoveTablesRequest) + } + + static { + defaultInstance = new MoveTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveTablesRequest) + } + + public interface MoveTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code MoveTablesResponse} + */ + public static final class MoveTablesResponse extends + com.google.protobuf.GeneratedMessage + implements MoveTablesResponseOrBuilder { + // Use MoveTablesResponse.newBuilder() to construct. + private MoveTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesResponse defaultInstance; + public static MoveTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_MoveTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:MoveTablesResponse) + } + + static { + defaultInstance = new MoveTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveTablesResponse) + } + + public interface AddGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code AddGroupRequest} + */ + public static final class AddGroupRequest extends + com.google.protobuf.GeneratedMessage + implements AddGroupRequestOrBuilder { + // Use AddGroupRequest.newBuilder() to construct. + private AddGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddGroupRequest defaultInstance; + public static AddGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public AddGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AddGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:AddGroupRequest) + } + + static { + defaultInstance = new AddGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddGroupRequest) + } + + public interface AddGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code AddGroupResponse} + */ + public static final class AddGroupResponse extends + com.google.protobuf.GeneratedMessage + implements AddGroupResponseOrBuilder { + // Use AddGroupResponse.newBuilder() to construct. + private AddGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddGroupResponse defaultInstance; + public static AddGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public AddGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AddGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_AddGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:AddGroupResponse) + } + + static { + defaultInstance = new AddGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddGroupResponse) + } + + public interface RemoveGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code RemoveGroupRequest} + */ + public static final class RemoveGroupRequest extends + com.google.protobuf.GeneratedMessage + implements RemoveGroupRequestOrBuilder { + // Use RemoveGroupRequest.newBuilder() to construct. + private RemoveGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveGroupRequest defaultInstance; + public static RemoveGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public RemoveGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RemoveGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RemoveGroupRequest) + } + + static { + defaultInstance = new RemoveGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoveGroupRequest) + } + + public interface RemoveGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code RemoveGroupResponse} + */ + public static final class RemoveGroupResponse extends + com.google.protobuf.GeneratedMessage + implements RemoveGroupResponseOrBuilder { + // Use RemoveGroupResponse.newBuilder() to construct. + private RemoveGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveGroupResponse defaultInstance; + public static RemoveGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public RemoveGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RemoveGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_RemoveGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:RemoveGroupResponse) + } + + static { + defaultInstance = new RemoveGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoveGroupResponse) + } + + public interface BalanceGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code BalanceGroupRequest} + */ + public static final class BalanceGroupRequest extends + com.google.protobuf.GeneratedMessage + implements BalanceGroupRequestOrBuilder { + // Use BalanceGroupRequest.newBuilder() to construct. + private BalanceGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceGroupRequest defaultInstance; + public static BalanceGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public BalanceGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BalanceGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BalanceGroupRequest) + } + + static { + defaultInstance = new BalanceGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BalanceGroupRequest) + } + + public interface BalanceGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool balanceRan = 1; + /** + * required bool balanceRan = 1; + */ + boolean hasBalanceRan(); + /** + * required bool balanceRan = 1; + */ + boolean getBalanceRan(); + } + /** + * Protobuf type {@code BalanceGroupResponse} + */ + public static final class BalanceGroupResponse extends + com.google.protobuf.GeneratedMessage + implements BalanceGroupResponseOrBuilder { + // Use BalanceGroupResponse.newBuilder() to construct. + private BalanceGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceGroupResponse defaultInstance; + public static BalanceGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public BalanceGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + balanceRan_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool balanceRan = 1; + public static final int BALANCERAN_FIELD_NUMBER = 1; + private boolean balanceRan_; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + + private void initFields() { + balanceRan_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBalanceRan()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, balanceRan_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, balanceRan_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse) obj; + + boolean result = true; + result = result && (hasBalanceRan() == other.hasBalanceRan()); + if (hasBalanceRan()) { + result = result && (getBalanceRan() + == other.getBalanceRan()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBalanceRan()) { + hash = (37 * hash) + BALANCERAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBalanceRan()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BalanceGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + balanceRan_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_BalanceGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.balanceRan_ = balanceRan_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance()) return this; + if (other.hasBalanceRan()) { + setBalanceRan(other.getBalanceRan()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBalanceRan()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool balanceRan = 1; + private boolean balanceRan_ ; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + /** + * required bool balanceRan = 1; + */ + public Builder setBalanceRan(boolean value) { + bitField0_ |= 0x00000001; + balanceRan_ = value; + onChanged(); + return this; + } + /** + * required bool balanceRan = 1; + */ + public Builder clearBalanceRan() { + bitField0_ = (bitField0_ & ~0x00000001); + balanceRan_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BalanceGroupResponse) + } + + static { + defaultInstance = new BalanceGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BalanceGroupResponse) + } + + public interface ListGroupsRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code ListGroupsRequest} + */ + public static final class ListGroupsRequest extends + com.google.protobuf.GeneratedMessage + implements ListGroupsRequestOrBuilder { + // Use ListGroupsRequest.newBuilder() to construct. + private ListGroupsRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListGroupsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListGroupsRequest defaultInstance; + public static ListGroupsRequest getDefaultInstance() { + return defaultInstance; + } + + public ListGroupsRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListGroupsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListGroupsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListGroupsRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListGroupsRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:ListGroupsRequest) + } + + static { + defaultInstance = new ListGroupsRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListGroupsRequest) + } + + public interface ListGroupsResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .GroupInfo group_info = 1; + /** + * repeated .GroupInfo group_info = 1; + */ + java.util.List + getGroupInfoList(); + /** + * repeated .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(int index); + /** + * repeated .GroupInfo group_info = 1; + */ + int getGroupInfoCount(); + /** + * repeated .GroupInfo group_info = 1; + */ + java.util.List + getGroupInfoOrBuilderList(); + /** + * repeated .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code ListGroupsResponse} + */ + public static final class ListGroupsResponse extends + com.google.protobuf.GeneratedMessage + implements ListGroupsResponseOrBuilder { + // Use ListGroupsResponse.newBuilder() to construct. + private ListGroupsResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListGroupsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListGroupsResponse defaultInstance; + public static ListGroupsResponse getDefaultInstance() { + return defaultInstance; + } + + public ListGroupsResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListGroupsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + groupInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = java.util.Collections.unmodifiableList(groupInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListGroupsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListGroupsResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private java.util.List groupInfo_; + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List getGroupInfoList() { + return groupInfo_; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoOrBuilderList() { + return groupInfo_; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public int getGroupInfoCount() { + return groupInfo_.size(); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(int index) { + return groupInfo_.get(index); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index) { + return groupInfo_.get(index); + } + + private void initFields() { + groupInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getGroupInfoCount(); i++) { + if (!getGroupInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < groupInfo_.size(); i++) { + output.writeMessage(1, groupInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < groupInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse) obj; + + boolean result = true; + result = result && getGroupInfoList() + .equals(other.getGroupInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getGroupInfoCount() > 0) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListGroupsResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + groupInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListGroupsResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse(this); + int from_bitField0_ = bitField0_; + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = java.util.Collections.unmodifiableList(groupInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance()) return this; + if (groupInfoBuilder_ == null) { + if (!other.groupInfo_.isEmpty()) { + if (groupInfo_.isEmpty()) { + groupInfo_ = other.groupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureGroupInfoIsMutable(); + groupInfo_.addAll(other.groupInfo_); + } + onChanged(); + } + } else { + if (!other.groupInfo_.isEmpty()) { + if (groupInfoBuilder_.isEmpty()) { + groupInfoBuilder_.dispose(); + groupInfoBuilder_ = null; + groupInfo_ = other.groupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + groupInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getGroupInfoFieldBuilder() : null; + } else { + groupInfoBuilder_.addAllMessages(other.groupInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getGroupInfoCount(); i++) { + if (!getGroupInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .GroupInfo group_info = 1; + private java.util.List groupInfo_ = + java.util.Collections.emptyList(); + private void ensureGroupInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = new java.util.ArrayList(groupInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List getGroupInfoList() { + if (groupInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(groupInfo_); + } else { + return groupInfoBuilder_.getMessageList(); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public int getGroupInfoCount() { + if (groupInfoBuilder_ == null) { + return groupInfo_.size(); + } else { + return groupInfoBuilder_.getCount(); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(int index) { + if (groupInfoBuilder_ == null) { + return groupInfo_.get(index); + } else { + return groupInfoBuilder_.getMessage(index); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.set(index, value); + onChanged(); + } else { + groupInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.add(value); + onChanged(); + } else { + groupInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.add(index, value); + onChanged(); + } else { + groupInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.add(builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addAllGroupInfo( + java.lang.Iterable values) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + super.addAll(values, groupInfo_); + onChanged(); + } else { + groupInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder removeGroupInfo(int index) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.remove(index); + onChanged(); + } else { + groupInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder( + int index) { + return getGroupInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index) { + if (groupInfoBuilder_ == null) { + return groupInfo_.get(index); } else { + return groupInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoOrBuilderList() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(groupInfo_); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder addGroupInfoBuilder() { + return getGroupInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder addGroupInfoBuilder( + int index) { + return getGroupInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoBuilderList() { + return getGroupInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ListGroupsResponse) + } + + static { + defaultInstance = new ListGroupsResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListGroupsResponse) + } + + public interface GetGroupOfServerRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string server = 1; + /** + * required string server = 1; + */ + boolean hasServer(); + /** + * required string server = 1; + */ + java.lang.String getServer(); + /** + * required string server = 1; + */ + com.google.protobuf.ByteString + getServerBytes(); + } + /** + * Protobuf type {@code GetGroupOfServerRequest} + */ + public static final class GetGroupOfServerRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupOfServerRequestOrBuilder { + // Use GetGroupOfServerRequest.newBuilder() to construct. + private GetGroupOfServerRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupOfServerRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupOfServerRequest defaultInstance; + public static GetGroupOfServerRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupOfServerRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupOfServerRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + server_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupOfServerRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupOfServerRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private java.lang.Object server_; + /** + * required string server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server = 1; + */ + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + server_ = s; + } + return s; + } + } + /** + * required string server = 1; + */ + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + server_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getServerBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getServerBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupOfServerRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + server_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.server_ = server_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.getDefaultInstance()) return this; + if (other.hasServer()) { + bitField0_ |= 0x00000001; + server_ = other.server_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string server = 1; + private java.lang.Object server_ = ""; + /** + * required string server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server = 1; + */ + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + server_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string server = 1; + */ + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string server = 1; + */ + public Builder setServer( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; + } + /** + * required string server = 1; + */ + public Builder clearServer() { + bitField0_ = (bitField0_ & ~0x00000001); + server_ = getDefaultInstance().getServer(); + onChanged(); + return this; + } + /** + * required string server = 1; + */ + public Builder setServerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetGroupOfServerRequest) + } + + static { + defaultInstance = new GetGroupOfServerRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupOfServerRequest) + } + + public interface GetGroupOfServerResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .GroupInfo group_info = 1; + /** + * optional .GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code GetGroupOfServerResponse} + */ + public static final class GetGroupOfServerResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupOfServerResponseOrBuilder { + // Use GetGroupOfServerResponse.newBuilder() to construct. + private GetGroupOfServerResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupOfServerResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupOfServerResponse defaultInstance; + public static GetGroupOfServerResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupOfServerResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupOfServerResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupOfServerResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupOfServerResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupOfServerResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GetGroupOfServerResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupOfServerResponse) + } + + static { + defaultInstance = new GetGroupOfServerResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupOfServerResponse) + } + + public interface ListServersInTransitionRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code ListServersInTransitionRequest} + */ + public static final class ListServersInTransitionRequest extends + com.google.protobuf.GeneratedMessage + implements ListServersInTransitionRequestOrBuilder { + // Use ListServersInTransitionRequest.newBuilder() to construct. + private ListServersInTransitionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListServersInTransitionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListServersInTransitionRequest defaultInstance; + public static ListServersInTransitionRequest getDefaultInstance() { + return defaultInstance; + } + + public ListServersInTransitionRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListServersInTransitionRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListServersInTransitionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListServersInTransitionRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListServersInTransitionRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:ListServersInTransitionRequest) + } + + static { + defaultInstance = new ListServersInTransitionRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListServersInTransitionRequest) + } + + public interface ListServersInTransitionResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .NameStringPair transitions = 1; + /** + * repeated .NameStringPair transitions = 1; + */ + java.util.List + getTransitionsList(); + /** + * repeated .NameStringPair transitions = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getTransitions(int index); + /** + * repeated .NameStringPair transitions = 1; + */ + int getTransitionsCount(); + /** + * repeated .NameStringPair transitions = 1; + */ + java.util.List + getTransitionsOrBuilderList(); + /** + * repeated .NameStringPair transitions = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getTransitionsOrBuilder( + int index); + } + /** + * Protobuf type {@code ListServersInTransitionResponse} + */ + public static final class ListServersInTransitionResponse extends + com.google.protobuf.GeneratedMessage + implements ListServersInTransitionResponseOrBuilder { + // Use ListServersInTransitionResponse.newBuilder() to construct. + private ListServersInTransitionResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListServersInTransitionResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListServersInTransitionResponse defaultInstance; + public static ListServersInTransitionResponse getDefaultInstance() { + return defaultInstance; + } + + public ListServersInTransitionResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListServersInTransitionResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + transitions_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + transitions_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + transitions_ = java.util.Collections.unmodifiableList(transitions_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListServersInTransitionResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListServersInTransitionResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .NameStringPair transitions = 1; + public static final int TRANSITIONS_FIELD_NUMBER = 1; + private java.util.List transitions_; + /** + * repeated .NameStringPair transitions = 1; + */ + public java.util.List getTransitionsList() { + return transitions_; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public java.util.List + getTransitionsOrBuilderList() { + return transitions_; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public int getTransitionsCount() { + return transitions_.size(); + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getTransitions(int index) { + return transitions_.get(index); + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getTransitionsOrBuilder( + int index) { + return transitions_.get(index); + } + + private void initFields() { + transitions_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTransitionsCount(); i++) { + if (!getTransitions(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < transitions_.size(); i++) { + output.writeMessage(1, transitions_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < transitions_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, transitions_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse) obj; + + boolean result = true; + result = result && getTransitionsList() + .equals(other.getTransitionsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTransitionsCount() > 0) { + hash = (37 * hash) + TRANSITIONS_FIELD_NUMBER; + hash = (53 * hash) + getTransitionsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListServersInTransitionResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTransitionsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (transitionsBuilder_ == null) { + transitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + transitionsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_ListServersInTransitionResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse(this); + int from_bitField0_ = bitField0_; + if (transitionsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + transitions_ = java.util.Collections.unmodifiableList(transitions_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.transitions_ = transitions_; + } else { + result.transitions_ = transitionsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance()) return this; + if (transitionsBuilder_ == null) { + if (!other.transitions_.isEmpty()) { + if (transitions_.isEmpty()) { + transitions_ = other.transitions_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTransitionsIsMutable(); + transitions_.addAll(other.transitions_); + } + onChanged(); + } + } else { + if (!other.transitions_.isEmpty()) { + if (transitionsBuilder_.isEmpty()) { + transitionsBuilder_.dispose(); + transitionsBuilder_ = null; + transitions_ = other.transitions_; + bitField0_ = (bitField0_ & ~0x00000001); + transitionsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTransitionsFieldBuilder() : null; + } else { + transitionsBuilder_.addAllMessages(other.transitions_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getTransitionsCount(); i++) { + if (!getTransitions(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .NameStringPair transitions = 1; + private java.util.List transitions_ = + java.util.Collections.emptyList(); + private void ensureTransitionsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + transitions_ = new java.util.ArrayList(transitions_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> transitionsBuilder_; + + /** + * repeated .NameStringPair transitions = 1; + */ + public java.util.List getTransitionsList() { + if (transitionsBuilder_ == null) { + return java.util.Collections.unmodifiableList(transitions_); + } else { + return transitionsBuilder_.getMessageList(); + } + } + /** + * repeated .NameStringPair transitions = 1; + */ + public int getTransitionsCount() { + if (transitionsBuilder_ == null) { + return transitions_.size(); + } else { + return transitionsBuilder_.getCount(); + } + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getTransitions(int index) { + if (transitionsBuilder_ == null) { + return transitions_.get(index); + } else { + return transitionsBuilder_.getMessage(index); + } + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder setTransitions( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (transitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTransitionsIsMutable(); + transitions_.set(index, value); + onChanged(); + } else { + transitionsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder setTransitions( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (transitionsBuilder_ == null) { + ensureTransitionsIsMutable(); + transitions_.set(index, builderForValue.build()); + onChanged(); + } else { + transitionsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder addTransitions(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (transitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTransitionsIsMutable(); + transitions_.add(value); + onChanged(); + } else { + transitionsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder addTransitions( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (transitionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTransitionsIsMutable(); + transitions_.add(index, value); + onChanged(); + } else { + transitionsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder addTransitions( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (transitionsBuilder_ == null) { + ensureTransitionsIsMutable(); + transitions_.add(builderForValue.build()); + onChanged(); + } else { + transitionsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder addTransitions( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (transitionsBuilder_ == null) { + ensureTransitionsIsMutable(); + transitions_.add(index, builderForValue.build()); + onChanged(); + } else { + transitionsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder addAllTransitions( + java.lang.Iterable values) { + if (transitionsBuilder_ == null) { + ensureTransitionsIsMutable(); + super.addAll(values, transitions_); + onChanged(); + } else { + transitionsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder clearTransitions() { + if (transitionsBuilder_ == null) { + transitions_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + transitionsBuilder_.clear(); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public Builder removeTransitions(int index) { + if (transitionsBuilder_ == null) { + ensureTransitionsIsMutable(); + transitions_.remove(index); + onChanged(); + } else { + transitionsBuilder_.remove(index); + } + return this; + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getTransitionsBuilder( + int index) { + return getTransitionsFieldBuilder().getBuilder(index); + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getTransitionsOrBuilder( + int index) { + if (transitionsBuilder_ == null) { + return transitions_.get(index); } else { + return transitionsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .NameStringPair transitions = 1; + */ + public java.util.List + getTransitionsOrBuilderList() { + if (transitionsBuilder_ != null) { + return transitionsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(transitions_); + } + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addTransitionsBuilder() { + return getTransitionsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + } + /** + * repeated .NameStringPair transitions = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addTransitionsBuilder( + int index) { + return getTransitionsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + } + /** + * repeated .NameStringPair transitions = 1; + */ + public java.util.List + getTransitionsBuilderList() { + return getTransitionsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> + getTransitionsFieldBuilder() { + if (transitionsBuilder_ == null) { + transitionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( + transitions_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + transitions_ = null; + } + return transitionsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ListServersInTransitionResponse) + } + + static { + defaultInstance = new ListServersInTransitionResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListServersInTransitionResponse) + } + + /** + * Protobuf service {@code RSGroupService} + */ + public static abstract class RSGroupService + implements com.google.protobuf.Service { + protected RSGroupService() {} + + public interface Interface { + /** + * rpc ListTablesOfGroup(.ListTablesOfGroupRequest) returns (.ListTablesOfGroupResponse); + */ + public abstract void listTablesOfGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfo(.GetGroupInfoRequest) returns (.GetGroupInfoResponse); + */ + public abstract void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfTable(.GetGroupInfoOfTableRequest) returns (.GetGroupInfoOfTableResponse); + */ + public abstract void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.MoveServersRequest) returns (.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.MoveTablesRequest) returns (.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddGroup(.AddGroupRequest) returns (.AddGroupResponse); + */ + public abstract void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveGroup(.RemoveGroupRequest) returns (.RemoveGroupResponse); + */ + public abstract void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceGroup(.BalanceGroupRequest) returns (.BalanceGroupResponse); + */ + public abstract void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListGroups(.ListGroupsRequest) returns (.ListGroupsResponse); + */ + public abstract void listGroups( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupOfServer(.GetGroupOfServerRequest) returns (.GetGroupOfServerResponse); + */ + public abstract void getGroupOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListServersInTransition(.ListServersInTransitionRequest) returns (.ListServersInTransitionResponse); + */ + public abstract void listServersInTransition( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new RSGroupService() { + @java.lang.Override + public void listTablesOfGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.listTablesOfGroup(controller, request, done); + } + + @java.lang.Override + public void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfo(controller, request, done); + } + + @java.lang.Override + public void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfoOfTable(controller, request, done); + } + + @java.lang.Override + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveServers(controller, request, done); + } + + @java.lang.Override + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveTables(controller, request, done); + } + + @java.lang.Override + public void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.addGroup(controller, request, done); + } + + @java.lang.Override + public void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.removeGroup(controller, request, done); + } + + @java.lang.Override + public void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.balanceGroup(controller, request, done); + } + + @java.lang.Override + public void listGroups( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest request, + com.google.protobuf.RpcCallback done) { + impl.listGroups(controller, request, done); + } + + @java.lang.Override + public void getGroupOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupOfServer(controller, request, done); + } + + @java.lang.Override + public void listServersInTransition( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest request, + com.google.protobuf.RpcCallback done) { + impl.listServersInTransition(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.listTablesOfGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest)request); + case 1: + return impl.getGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest)request); + case 2: + return impl.getGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest)request); + case 3: + return impl.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest)request); + case 4: + return impl.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest)request); + case 5: + return impl.addGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest)request); + case 6: + return impl.removeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest)request); + case 7: + return impl.balanceGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest)request); + case 8: + return impl.listGroups(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest)request); + case 9: + return impl.getGroupOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest)request); + case 10: + return impl.listServersInTransition(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.getDefaultInstance(); + case 10: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance(); + case 10: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc ListTablesOfGroup(.ListTablesOfGroupRequest) returns (.ListTablesOfGroupResponse); + */ + public abstract void listTablesOfGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfo(.GetGroupInfoRequest) returns (.GetGroupInfoResponse); + */ + public abstract void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfTable(.GetGroupInfoOfTableRequest) returns (.GetGroupInfoOfTableResponse); + */ + public abstract void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.MoveServersRequest) returns (.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.MoveTablesRequest) returns (.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddGroup(.AddGroupRequest) returns (.AddGroupResponse); + */ + public abstract void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveGroup(.RemoveGroupRequest) returns (.RemoveGroupResponse); + */ + public abstract void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceGroup(.BalanceGroupRequest) returns (.BalanceGroupResponse); + */ + public abstract void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListGroups(.ListGroupsRequest) returns (.ListGroupsResponse); + */ + public abstract void listGroups( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupOfServer(.GetGroupOfServerRequest) returns (.GetGroupOfServerResponse); + */ + public abstract void getGroupOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListServersInTransition(.ListServersInTransitionRequest) returns (.ListServersInTransitionResponse); + */ + public abstract void listServersInTransition( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.listTablesOfGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.getGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.getGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.addGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.removeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: + this.balanceGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 8: + this.listGroups(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 9: + this.getGroupOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 10: + this.listServersInTransition(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest.getDefaultInstance(); + case 10: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance(); + case 10: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void listTablesOfGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance())); + } + + public void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance())); + } + + public void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance())); + } + + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance())); + } + + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance())); + } + + public void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance())); + } + + public void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance())); + } + + public void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance())); + } + + public void listGroups( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance())); + } + + public void getGroupOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance())); + } + + public void listServersInTransition( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse listTablesOfGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse listGroups( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse getGroupOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse listServersInTransition( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse listTablesOfGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListTablesOfGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupInfoOfTableResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveServersResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.MoveTablesResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.AddGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RemoveGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.BalanceGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse listGroups( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListGroupsResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse getGroupOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GetGroupOfServerResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse listServersInTransition( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(10), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.ListServersInTransitionResponse.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:RSGroupService) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GroupInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GroupInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListTablesOfGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListTablesOfGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListTablesOfGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListTablesOfGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoOfTableRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoOfTableResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveServersRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveServersRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveServersResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveServersResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoveGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoveGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoveGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoveGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BalanceGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BalanceGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BalanceGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BalanceGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListGroupsRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListGroupsRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListGroupsResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListGroupsResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupOfServerRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupOfServerRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupOfServerResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupOfServerResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListServersInTransitionRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListServersInTransitionRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListServersInTransitionResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListServersInTransitionResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\rRSGroup.proto\032\013HBase.proto\"F\n\tGroupInf" + + "o\022\014\n\004name\030\001 \002(\t\022\017\n\007servers\030\002 \003(\t\022\032\n\006tabl" + + "es\030\003 \003(\0132\n.TableName\".\n\030ListTablesOfGrou" + + "pRequest\022\022\n\ngroup_name\030\001 \002(\t\";\n\031ListTabl" + + "esOfGroupResponse\022\036\n\ntable_name\030\001 \003(\0132\n." + + "TableName\")\n\023GetGroupInfoRequest\022\022\n\ngrou" + + "p_name\030\001 \002(\t\"6\n\024GetGroupInfoResponse\022\036\n\n" + + "group_info\030\001 \001(\0132\n.GroupInfo\"<\n\032GetGroup" + + "InfoOfTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n" + + ".TableName\"=\n\033GetGroupInfoOfTableRespons", + "e\022\036\n\ngroup_info\030\001 \001(\0132\n.GroupInfo\";\n\022Mov" + + "eServersRequest\022\024\n\014target_group\030\001 \002(\t\022\017\n" + + "\007servers\030\002 \003(\t\"\025\n\023MoveServersResponse\"I\n" + + "\021MoveTablesRequest\022\024\n\014target_group\030\001 \002(\t" + + "\022\036\n\ntable_name\030\002 \003(\0132\n.TableName\"\024\n\022Move" + + "TablesResponse\"%\n\017AddGroupRequest\022\022\n\ngro" + + "up_name\030\001 \002(\t\"\022\n\020AddGroupResponse\"(\n\022Rem" + + "oveGroupRequest\022\022\n\ngroup_name\030\001 \002(\t\"\025\n\023R" + + "emoveGroupResponse\")\n\023BalanceGroupReques" + + "t\022\022\n\ngroup_name\030\001 \002(\t\"*\n\024BalanceGroupRes", + "ponse\022\022\n\nbalanceRan\030\001 \002(\010\"\023\n\021ListGroupsR" + + "equest\"4\n\022ListGroupsResponse\022\036\n\ngroup_in" + + "fo\030\001 \003(\0132\n.GroupInfo\")\n\027GetGroupOfServer" + + "Request\022\016\n\006server\030\001 \002(\t\":\n\030GetGroupOfSer" + + "verResponse\022\036\n\ngroup_info\030\001 \001(\0132\n.GroupI" + + "nfo\" \n\036ListServersInTransitionRequest\"G\n" + + "\037ListServersInTransitionResponse\022$\n\013tran" + + "sitions\030\001 \003(\0132\017.NameStringPair2\342\005\n\016RSGro" + + "upService\022J\n\021ListTablesOfGroup\022\031.ListTab" + + "lesOfGroupRequest\032\032.ListTablesOfGroupRes", + "ponse\022;\n\014GetGroupInfo\022\024.GetGroupInfoRequ" + + "est\032\025.GetGroupInfoResponse\022P\n\023GetGroupIn" + + "foOfTable\022\033.GetGroupInfoOfTableRequest\032\034" + + ".GetGroupInfoOfTableResponse\0228\n\013MoveServ" + + "ers\022\023.MoveServersRequest\032\024.MoveServersRe" + + "sponse\0225\n\nMoveTables\022\022.MoveTablesRequest" + + "\032\023.MoveTablesResponse\022/\n\010AddGroup\022\020.AddG" + + "roupRequest\032\021.AddGroupResponse\0228\n\013Remove" + + "Group\022\023.RemoveGroupRequest\032\024.RemoveGroup" + + "Response\022;\n\014BalanceGroup\022\024.BalanceGroupR", + "equest\032\025.BalanceGroupResponse\0225\n\nListGro" + + "ups\022\022.ListGroupsRequest\032\023.ListGroupsResp" + + "onse\022G\n\020GetGroupOfServer\022\030.GetGroupOfSer" + + "verRequest\032\031.GetGroupOfServerResponse\022\\\n" + + "\027ListServersInTransition\022\037.ListServersIn" + + "TransitionRequest\032 .ListServersInTransit" + + "ionResponseBC\n*org.apache.hadoop.hbase.p" + + "rotobuf.generatedB\rRSGroupProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_GroupInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_GroupInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GroupInfo_descriptor, + new java.lang.String[] { "Name", "Servers", "Tables", }); + internal_static_ListTablesOfGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_ListTablesOfGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListTablesOfGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_ListTablesOfGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_ListTablesOfGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListTablesOfGroupResponse_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetGroupInfoRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_GetGroupInfoRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_GetGroupInfoResponse_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_GetGroupInfoResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_GetGroupInfoOfTableRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoOfTableRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetGroupInfoOfTableResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoOfTableResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_MoveServersRequest_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_MoveServersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveServersRequest_descriptor, + new java.lang.String[] { "TargetGroup", "Servers", }); + internal_static_MoveServersResponse_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_MoveServersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveServersResponse_descriptor, + new java.lang.String[] { }); + internal_static_MoveTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_MoveTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveTablesRequest_descriptor, + new java.lang.String[] { "TargetGroup", "TableName", }); + internal_static_MoveTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_MoveTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveTablesResponse_descriptor, + new java.lang.String[] { }); + internal_static_AddGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_AddGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_AddGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_AddGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_RemoveGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_RemoveGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoveGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_RemoveGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_RemoveGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoveGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_BalanceGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_BalanceGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BalanceGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_BalanceGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_BalanceGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BalanceGroupResponse_descriptor, + new java.lang.String[] { "BalanceRan", }); + internal_static_ListGroupsRequest_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_ListGroupsRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListGroupsRequest_descriptor, + new java.lang.String[] { }); + internal_static_ListGroupsResponse_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_ListGroupsResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListGroupsResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_GetGroupOfServerRequest_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_GetGroupOfServerRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupOfServerRequest_descriptor, + new java.lang.String[] { "Server", }); + internal_static_GetGroupOfServerResponse_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_GetGroupOfServerResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupOfServerResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_ListServersInTransitionRequest_descriptor = + getDescriptor().getMessageTypes().get(21); + internal_static_ListServersInTransitionRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListServersInTransitionRequest_descriptor, + new java.lang.String[] { }); + internal_static_ListServersInTransitionResponse_descriptor = + getDescriptor().getMessageTypes().get(22); + internal_static_ListServersInTransitionResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListServersInTransitionResponse_descriptor, + new java.lang.String[] { "Transitions", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/RSGroup.proto b/hbase-protocol/src/main/protobuf/RSGroup.proto new file mode 100644 index 0000000..b4d1866 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/RSGroup.proto @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RSGroupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +message GroupInfo { + required string name = 1; + repeated string servers = 2; + repeated TableName tables = 3; +} + +message ListTablesOfGroupRequest { + required string group_name = 1; +} + +message ListTablesOfGroupResponse { + repeated TableName table_name = 1; +} + +message GetGroupInfoRequest { + required string group_name = 1; +} + +message GetGroupInfoResponse { + optional GroupInfo group_info = 1; +} + +message GetGroupInfoOfTableRequest { + required TableName table_name = 1; +} + +message GetGroupInfoOfTableResponse { + optional GroupInfo group_info = 1; +} + +message MoveServersRequest { + required string target_group = 1; + repeated string servers = 2; +} + +message MoveServersResponse { +} + +message MoveTablesRequest { + required string target_group = 1; + repeated TableName table_name = 2; +} + +message MoveTablesResponse { +} + +message AddGroupRequest { + required string group_name = 1; +} + +message AddGroupResponse { +} + +message RemoveGroupRequest { + required string group_name = 1; +} + +message RemoveGroupResponse { +} + +message BalanceGroupRequest { + required string group_name = 1; +} + +message BalanceGroupResponse { + required bool balanceRan = 1; +} + +message ListGroupsRequest { +} + +message ListGroupsResponse { + repeated GroupInfo group_info = 1; +} + +message GetGroupOfServerRequest { + required string server = 1; +} + +message GetGroupOfServerResponse { + optional GroupInfo group_info = 1; +} + +message ListServersInTransitionRequest { +} + +message ListServersInTransitionResponse { + repeated NameStringPair transitions = 1; +} + +service RSGroupService { + rpc ListTablesOfGroup(ListTablesOfGroupRequest) + returns (ListTablesOfGroupResponse); + + rpc GetGroupInfo(GetGroupInfoRequest) + returns (GetGroupInfoResponse); + + rpc GetGroupInfoOfTable(GetGroupInfoOfTableRequest) + returns (GetGroupInfoOfTableResponse); + + rpc MoveServers(MoveServersRequest) + returns (MoveServersResponse); + + rpc MoveTables(MoveTablesRequest) + returns (MoveTablesResponse); + + rpc AddGroup(AddGroupRequest) + returns (AddGroupResponse); + + rpc RemoveGroup(RemoveGroupRequest) + returns (RemoveGroupResponse); + + rpc BalanceGroup(BalanceGroupRequest) + returns (BalanceGroupResponse); + + rpc ListGroups(ListGroupsRequest) + returns (ListGroupsResponse); + + rpc GetGroupOfServer(GetGroupOfServerRequest) + returns (GetGroupOfServerResponse); + + rpc ListServersInTransition(ListServersInTransitionRequest) + returns (ListServersInTransitionResponse); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java new file mode 100644 index 0000000..a254800 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java @@ -0,0 +1,124 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; + +/** + * Group user API interface used between client and server. + */ +@InterfaceAudience.Private +public interface GroupAdmin { + /** + * Get member tables of a group. + * + * + * @param groupName the name of the group + * @return list of table names + */ + NavigableSet listTablesOfGroup(String groupName) throws IOException; + + /** + * Gets the group information. + * + * @param groupName the group name + * @return An instance of GroupInfo + */ + GroupInfo getGroupInfo(String groupName) throws IOException; + + /** + * Gets the group info of table. + * + * @param tableName the table name + * @return An instance of GroupInfo. + */ + GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException; + + /** + * Move a set of serves to another group + * + * + * @param servers set of servers, must be in the form HOST:PORT + * @param targetGroup the target group + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void moveServers(Set servers, String targetGroup) throws IOException; + + /** + * Move tables to a new group. + * This will unassign all of a table's region so it can be reassigned to the correct group. + * @param tables list of tables to move + * @param targetGroup target group + * @throws java.io.IOException + */ + void moveTables(Set tables, String targetGroup) throws IOException; + + /** + * Add a new group + * @param name name of the group + * @throws java.io.IOException + */ + void addGroup(String name) throws IOException; + + /** + * Remove a group + * @param name name of the group + * @throws java.io.IOException + */ + void removeGroup(String name) throws IOException; + + /** + * Balance the regions in a group + * + * @param name the name of the gorup to balance + * @return + * @throws java.io.IOException + */ + boolean balanceGroup(String name) throws IOException; + + /** + * Lists the existing groups. + * + * @return Collection of GroupInfo. + */ + List listGroups() throws IOException; + + /** + * Retrieve the GroupInfo a server is affiliated to + * @param hostPort + * @return + * @throws java.io.IOException + */ + GroupInfo getGroupOfServer(String hostPort) throws IOException; + + /** + * List servers that are currently being moved to a new group + * @return a map containing server=>targetGroup KV pairs + * @throws java.io.IOException + */ + Map listServersInTransition() throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java new file mode 100644 index 0000000..c298b87 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java @@ -0,0 +1,241 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.protobuf.ServiceException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeSet; + +/** + * Client used for managing region server group information. + */ +@InterfaceAudience.Public +public class GroupAdminClient implements GroupAdmin { + private RSGroupProtos.RSGroupService.BlockingInterface proxy; + private static final Log LOG = LogFactory.getLog(GroupAdminClient.class); + private int operationTimeout; + private GroupSerDe serDe = new GroupSerDe(); + + public GroupAdminClient(Configuration conf) throws IOException { + proxy = + RSGroupProtos.RSGroupService.newBlockingStub(new HBaseAdmin(conf).coprocessorService()); + operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + } + + @Override + public NavigableSet listTablesOfGroup(String groupName) throws IOException { + try { + TreeSet result = Sets.newTreeSet(); + List tables = + proxy.listTablesOfGroup(null, + RSGroupProtos.ListTablesOfGroupRequest.newBuilder().setGroupName(groupName).build()) + .getTableNameList(); + for(HBaseProtos.TableName entry: tables) { + result.add(ProtobufUtil.toTableName(entry)); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + try { + RSGroupProtos.GetGroupInfoResponse resp = + proxy.getGroupInfo(null, + RSGroupProtos.GetGroupInfoRequest.newBuilder().setGroupName(groupName).build()); + if(resp.hasGroupInfo()) { + return serDe.toPojo(resp.getGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + RSGroupProtos.GetGroupInfoOfTableRequest request = + RSGroupProtos.GetGroupInfoOfTableRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + + try { + return serDe.toPojo(proxy.getGroupInfoOfTable(null, request).getGroupInfo()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void moveServers(Set servers, String targetGroup) throws IOException { + RSGroupProtos.MoveServersRequest request = + RSGroupProtos.MoveServersRequest.newBuilder() + .setTargetGroup(targetGroup) + .addAllServers(servers).build(); + + try { + proxy.moveServers(null, request); + waitForTransitions(servers); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + RSGroupProtos.MoveTablesRequest.Builder builder = + RSGroupProtos.MoveTablesRequest.newBuilder() + .setTargetGroup(targetGroup); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + try { + proxy.moveTables(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void addGroup(String groupName) throws IOException { + RSGroupProtos.AddGroupRequest request = + RSGroupProtos.AddGroupRequest.newBuilder() + .setGroupName(groupName).build(); + try { + proxy.addGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void removeGroup(String name) throws IOException { + RSGroupProtos.RemoveGroupRequest request = + RSGroupProtos.RemoveGroupRequest.newBuilder() + .setGroupName(name).build(); + try { + proxy.removeGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public boolean balanceGroup(String name) throws IOException { + RSGroupProtos.BalanceGroupRequest request = + RSGroupProtos.BalanceGroupRequest.newBuilder() + .setGroupName(name).build(); + + try { + return proxy.balanceGroup(null, request).getBalanceRan(); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public List listGroups() throws IOException { + try { + List resp = + proxy.listGroups(null, RSGroupProtos.ListGroupsRequest.newBuilder().build()) + .getGroupInfoList(); + List result = new ArrayList(resp.size()); + for(RSGroupProtos.GroupInfo entry: resp) { + result.add(serDe.toPojo(entry)); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupOfServer(String hostPort) throws IOException { + RSGroupProtos.GetGroupOfServerRequest request = + RSGroupProtos.GetGroupOfServerRequest.newBuilder() + .setServer(hostPort).build(); + try { + return serDe.toPojo( + proxy.getGroupOfServer(null, request).getGroupInfo()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public Map listServersInTransition() throws IOException { + try { + List resp = + proxy.listServersInTransition(null, RSGroupProtos.ListServersInTransitionRequest + .newBuilder().build()).getTransitionsList(); + Map result = Maps.newHashMap(); + for(HBaseProtos.NameStringPair entry: resp) { + result.put(entry.getName(), entry.getValue()); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + private void waitForTransitions(Set servers) throws IOException { + long endTime = EnvironmentEdgeManager.getDelegate().currentTime()+operationTimeout; + boolean found; + do { + found = false; + for(String server: listServersInTransition().keySet()) { + found = found || servers.contains(server); + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + LOG.debug("Sleep interrupted", e); + + } + } while(found && EnvironmentEdgeManager.getDelegate().currentTime() <= endTime); + if (found) { + throw new DoNotRetryIOException("Timed out while Waiting for server transition to finish."); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminEndpoint.java new file mode 100644 index 0000000..385f5e9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminEndpoint.java @@ -0,0 +1,289 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.security.access.AccessControlLists; +import org.apache.hadoop.hbase.security.access.AccessController; +import org.apache.hadoop.hbase.security.access.Permission; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; + +/** + * Service to support Region Server Grouping (HBase-6721) + * This should be installed as a Master CoprocessorEndpoint + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class GroupAdminEndpoint + implements RSGroupProtos.RSGroupService.Interface, CoprocessorService, Coprocessor { + private static final Log LOG = LogFactory.getLog(GroupAdminEndpoint.class); + + private AccessController accessController; + private MasterCoprocessorEnvironment menv; + private MasterServices master; + private GroupAdmin groupAdmin; + private GroupSerDe serDe; + + @Override + public void start(CoprocessorEnvironment env) { + menv = (MasterCoprocessorEnvironment)env; + master = menv.getMasterServices(); + groupAdmin = new GroupAdminImpl(master); + serDe = new GroupSerDe(); + } + + @Override + public void stop(CoprocessorEnvironment env) { + } + + @Override + public Service getService() { + return RSGroupProtos.RSGroupService.newReflectiveService(this); + } + + //PB endpoints + + @Override + public void listTablesOfGroup(RpcController controller, RSGroupProtos.ListTablesOfGroupRequest request, RpcCallback done) { + RSGroupProtos.ListTablesOfGroupResponse response = null; + try { + RSGroupProtos.ListTablesOfGroupResponse.Builder builder = + RSGroupProtos.ListTablesOfGroupResponse.newBuilder(); + NavigableSet tables = groupAdmin.listTablesOfGroup(request.getGroupName()); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getGroupInfo(RpcController controller, RSGroupProtos.GetGroupInfoRequest request, RpcCallback done) { + RSGroupProtos.GetGroupInfoResponse response = null; + try { + RSGroupProtos.GetGroupInfoResponse.Builder builder = + RSGroupProtos.GetGroupInfoResponse.newBuilder(); + GroupInfo groupInfo = groupAdmin.getGroupInfo(request.getGroupName()); + if(groupInfo != null) { + builder.setGroupInfo(serDe.toProto(groupInfo)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getGroupInfoOfTable(RpcController controller, + RSGroupProtos.GetGroupInfoOfTableRequest request, + RpcCallback done) { + RSGroupProtos.GetGroupInfoOfTableResponse response = null; + try { + RSGroupProtos.GetGroupInfoOfTableResponse.Builder builder = + RSGroupProtos.GetGroupInfoOfTableResponse.newBuilder(); + GroupInfo groupInfo = groupAdmin.getGroupInfoOfTable(ProtobufUtil.toTableName(request.getTableName())); + response = builder.setGroupInfo(serDe.toProto(groupInfo)).build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveServers(RpcController controller, RSGroupProtos.MoveServersRequest request, RpcCallback done) { + RSGroupProtos.MoveServersResponse response = null; + try { + requireAdmin("moveServers"); + + RSGroupProtos.MoveServersResponse.Builder builder = + RSGroupProtos.MoveServersResponse.newBuilder(); + groupAdmin.moveServers(Sets.newHashSet(request.getServersList()), request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveTables(RpcController controller, RSGroupProtos.MoveTablesRequest request, RpcCallback done) { + RSGroupProtos.MoveTablesResponse response = null; + try { + requireAdmin("moveTables"); + + RSGroupProtos.MoveTablesResponse.Builder builder = + RSGroupProtos.MoveTablesResponse.newBuilder(); + Set tables = new HashSet(request.getTableNameList().size()); + for(HBaseProtos.TableName tableName: request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + groupAdmin.moveTables(tables, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void addGroup(RpcController controller, RSGroupProtos.AddGroupRequest request, RpcCallback done) { + RSGroupProtos.AddGroupResponse response = null; + try { + requireAdmin("addGroup"); + + RSGroupProtos.AddGroupResponse.Builder builder = + RSGroupProtos.AddGroupResponse.newBuilder(); + groupAdmin.addGroup(request.getGroupName()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void removeGroup(RpcController controller, RSGroupProtos.RemoveGroupRequest request, RpcCallback done) { + RSGroupProtos.RemoveGroupResponse response = null; + try { + requireAdmin("removeGroup"); + + RSGroupProtos.RemoveGroupResponse.Builder builder = + RSGroupProtos.RemoveGroupResponse.newBuilder(); + groupAdmin.removeGroup(request.getGroupName()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void balanceGroup(RpcController controller, RSGroupProtos.BalanceGroupRequest request, RpcCallback done) { + RSGroupProtos.BalanceGroupResponse response = null; + try { + requireAdmin("balanceGroup"); + + RSGroupProtos.BalanceGroupResponse.Builder builder = + RSGroupProtos.BalanceGroupResponse.newBuilder(); + builder.setBalanceRan(groupAdmin.balanceGroup(request.getGroupName())); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void listGroups(RpcController controller, RSGroupProtos.ListGroupsRequest request, RpcCallback done) { + RSGroupProtos.ListGroupsResponse response = null; + try { + RSGroupProtos.ListGroupsResponse.Builder builder = + RSGroupProtos.ListGroupsResponse.newBuilder(); + for(GroupInfo groupInfo: groupAdmin.listGroups()) { + builder.addGroupInfo(serDe.toProto(groupInfo)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getGroupOfServer(RpcController controller, RSGroupProtos.GetGroupOfServerRequest request, RpcCallback done) { + RSGroupProtos.GetGroupOfServerResponse response = null; + try { + RSGroupProtos.GetGroupOfServerResponse.Builder builder = + RSGroupProtos.GetGroupOfServerResponse.newBuilder(); + GroupInfo groupInfo = groupAdmin.getGroupOfServer(request.getServer()); + response = builder.setGroupInfo(serDe.toProto(groupInfo)).build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void listServersInTransition(RpcController controller, RSGroupProtos.ListServersInTransitionRequest request, RpcCallback done) { + RSGroupProtos.ListServersInTransitionResponse response = null; + try { + RSGroupProtos.ListServersInTransitionResponse.Builder builder = + RSGroupProtos.ListServersInTransitionResponse.newBuilder(); + for(Map.Entry entry: groupAdmin.listServersInTransition().entrySet()) { + builder.addTransitions(HBaseProtos.NameStringPair.newBuilder() + .setName(entry.getKey()) + .setValue(entry.getValue()).build()); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + void requireAdmin(String method) throws IOException { + if(getAccessController() != null) { + getAccessController().requirePermission(method, AccessControlLists.ACL_TABLE_NAME, null, null, + Permission.Action.ADMIN); + } + } + + AccessController getAccessController() { + if(accessController == null) { + accessController = (AccessController)menv.getMasterServices() + .getMasterCoprocessorHost().findCoprocessor(AccessController.class.getName()); + } + return accessController; + } + + void setGroupAdmin(GroupAdmin groupAdmin) { + this.groupAdmin = groupAdmin; + } + + GroupAdmin getGroupAdmin() { + return groupAdmin; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminImpl.java new file mode 100644 index 0000000..58179ba --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminImpl.java @@ -0,0 +1,379 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.metrics.util.MBeanUtil; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +/** + * Service to support Region Server Grouping (HBase-6721) + * This should be installed as a Master CoprocessorEndpoint + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class GroupAdminImpl implements GroupAdmin { + private static final Log LOG = LogFactory.getLog(GroupAdminImpl.class); + + private final long threadKeepAliveTimeInMillis = 1000; + private int threadMax = 1; + private BlockingQueue threadQ; + private MasterServices master; + private ExecutorService executorService; + //List of servers that are being moved from one group to another + //Key=host:port,Value=targetGroup + ConcurrentMap serversInTransition = + new ConcurrentHashMap(); + + public GroupAdminImpl(MasterServices master) { + this.master = master; + threadQ = new LinkedBlockingDeque(); + threadMax = master.getConfiguration().getInt("hbase.group.executor.threads", 1); + executorService = new ThreadPoolExecutor(threadMax, threadMax, + threadKeepAliveTimeInMillis, TimeUnit.MILLISECONDS, threadQ); + registerMBean(); + } + + @Override + public NavigableSet listTablesOfGroup(String groupName) throws IOException { + return getGroupInfoManager().getGroup(groupName).getTables(); + } + + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + return getGroupInfoManager().getGroup(groupName); + } + + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + String groupName = getGroupInfoManager().getGroupOfTable(tableName); + if (groupName == null) { + if(master.getTableDescriptors().get(tableName) == null) { + throw new ConstraintException("Table "+tableName+" does not exist"); + } + throw new ConstraintException("Table "+tableName+" has no group"); + } + return getGroupInfoManager().getGroup(groupName); + } + + @Override + public void moveServers(Set servers, String targetGroup) + throws IOException { + if (servers == null) { + throw new DoNotRetryIOException( + "The list of servers cannot be null."); + } + if (StringUtils.isEmpty(targetGroup)) { + throw new DoNotRetryIOException("The target group cannot be null."); + } + if(servers.size() < 1) { + return; + } + //check that it's a valid host and port + for(String server: servers) { + String splits[] = server.split(":",2); + if(splits.length < 2) + throw new DoNotRetryIOException("Server list contains not a valid : entry"); + Integer.parseInt(splits[1]); + } + + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + //we only allow a move from a single source group + //so this should be ok + GroupInfo srcGrp = manager.getGroupOfServer(servers.iterator().next()); + //only move online servers (from default) + //or servers from other groups + //this prevents bogus servers from entering groups + if(GroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) { + Set onlineServers = new HashSet(); + for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { + onlineServers.add(server.getHostAndPort()); + } + for(String el: servers) { + if(!onlineServers.contains(el)) { + throw new DoNotRetryIOException( + "Server "+el+" is not an online server in default group."); + } + } + } + + if(srcGrp.getServers().size() <= servers.size() && + srcGrp.getTables().size() > 0) { + throw new DoNotRetryIOException("Cannot leave a group "+srcGrp.getName()+ + " that contains tables " +"without servers."); + } + GroupMoveServerWorker.MoveServerPlan plan = + new GroupMoveServerWorker.MoveServerPlan(servers, targetGroup); + GroupMoveServerWorker worker = null; + try { + worker = new GroupMoveServerWorker(master, serversInTransition, getGroupInfoManager(), plan); + executorService.submit(worker); + LOG.info("GroupMoveServerWorkerSubmitted: "+plan.getTargetGroup()); + } catch(Exception e) { + LOG.error("Failed to submit GroupMoveServerWorker", e); + if (worker != null) { + worker.complete(); + } + throw new DoNotRetryIOException("Failed to submit GroupMoveServerWorker",e); + } + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + if (tables == null) { + throw new ConstraintException( + "The list of servers cannot be null."); + } + if(tables.size() < 1) { + LOG.debug("moveTables() passed an empty set. Ignoring."); + return; + } + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + if(targetGroup != null) { + GroupInfo destGroup = manager.getGroup(targetGroup); + if(destGroup == null) { + throw new ConstraintException("Target group does not exist: "+targetGroup); + } + if(destGroup.getServers().size() < 1) { + throw new ConstraintException("Target group must have at least one server."); + } + } + + for(TableName table : tables) { + String srcGroup = manager.getGroupOfTable(table); + if(srcGroup != null && srcGroup.equals(targetGroup)) { + throw new ConstraintException("Source group is the same as target group for table "+table+" :"+srcGroup); + } + } + + manager.moveTables(tables, targetGroup); + } + for(TableName table: tables) { + for(HRegionInfo region: + master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) { + master.getAssignmentManager().unassign(region); + } + } + } + + @Override + public void addGroup(String name) throws IOException { + getGroupInfoManager().addGroup(new GroupInfo(name)); + } + + @Override + public void removeGroup(String name) throws IOException { + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + GroupInfo groupInfo = getGroupInfoManager().getGroup(name); + if(groupInfo == null) { + throw new DoNotRetryIOException("Group "+name+" does not exist"); + } + int tableCount = groupInfo.getTables().size(); + if (tableCount > 0) { + throw new DoNotRetryIOException("Group "+name+" must have no associated tables: "+tableCount); + } + int serverCount = groupInfo.getServers().size(); + if(serverCount > 0) { + throw new DoNotRetryIOException("Group "+name+" must have no associated servers: "+serverCount); + } + for(NamespaceDescriptor ns: master.listNamespaceDescriptors()) { + String nsGroup = ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(nsGroup != null && nsGroup.equals(name)) { + throw new DoNotRetryIOException("Group "+name+" is referenced by namespace: "+ns.getName()); + } + } + manager.removeGroup(name); + } + } + + @Override + public boolean balanceGroup(String groupName) throws IOException { + ServerManager serverManager = master.getServerManager(); + AssignmentManager assignmentManager = master.getAssignmentManager(); + LoadBalancer balancer = master.getLoadBalancer(); + + boolean balancerRan; + synchronized (balancer) { + // Only allow one balance run at at time. + Map groupRIT = groupGetRegionsInTransition(groupName); + if (groupRIT.size() > 0) { + LOG.debug("Not running balancer because " + + groupRIT.size() + + " region(s) in transition: " + + StringUtils.abbreviate( + master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), + 256)); + return false; + } + if (serverManager.areDeadServersInProgress()) { + LOG.debug("Not running balancer because processing dead regionserver(s): " + + serverManager.getDeadServers()); + return false; + } + + //We balance per group instead of per table + List plans = new ArrayList(); + for(Map.Entry>> tableMap: + getGroupAssignmentsByTable(groupName).entrySet()) { + LOG.info("Creating partial plan for table "+tableMap.getKey()+": "+tableMap.getValue()); + List partialPlans = balancer.balanceCluster(tableMap.getValue()); + LOG.info("Partial plan for table "+tableMap.getKey()+": "+partialPlans); + if (partialPlans != null) { + plans.addAll(partialPlans); + } + } + long startTime = System.currentTimeMillis(); + balancerRan = plans != null; + if (plans != null && !plans.isEmpty()) { + LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size()); + for (RegionPlan plan: plans) { + LOG.info("balance " + plan); + assignmentManager.balance(plan); + } + LOG.info("Group balance "+groupName+" completed after "+(System.currentTimeMillis()-startTime)+" seconds"); + } + } + return balancerRan; + } + + @Override + public List listGroups() throws IOException { + return getGroupInfoManager().listGroups(); + } + + @Override + public GroupInfo getGroupOfServer(String hostPort) throws IOException { + return getGroupInfoManager().getGroupOfServer(hostPort); + } + + @Override + public Map listServersInTransition() throws IOException { + return Collections.unmodifiableMap(serversInTransition); + } + + @InterfaceAudience.Private + public GroupInfoManager getGroupInfoManager() throws IOException { + return ((GroupBasedLoadBalancer)master.getLoadBalancer()).getGroupInfoManager(); + } + + private Map groupGetRegionsInTransition(String groupName) + throws IOException { + Map rit = Maps.newTreeMap(); + AssignmentManager am = master.getAssignmentManager(); + GroupInfo groupInfo = getGroupInfo(groupName); + for(TableName tableName : groupInfo.getTables()) { + for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { + RegionState state = + master.getAssignmentManager().getRegionStates().getRegionTransitionState(regionInfo); + if(state != null) { + rit.put(regionInfo.getEncodedName(), state); + } + } + } + return rit; + } + + private Map>> + getGroupAssignmentsByTable(String groupName) throws IOException { + Map>> result = Maps.newHashMap(); + GroupInfo groupInfo = getGroupInfo(groupName); + Map>> assignments = Maps.newHashMap(); + for(Map.Entry entry: + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + TableName currTable = entry.getKey().getTable(); + ServerName currServer = entry.getValue(); + HRegionInfo currRegion = entry.getKey(); + if(groupInfo.getTables().contains(currTable)) { + if(!assignments.containsKey(entry.getKey().getTable())) { + assignments.put(currTable, new HashMap>()); + } + if(!assignments.get(currTable).containsKey(currServer)) { + assignments.get(currTable).put(currServer, new ArrayList()); + } + assignments.get(currTable).get(currServer).add(currRegion); + } + } + + Map> serverMap = Maps.newHashMap(); + for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { + if(groupInfo.getServers().contains(serverName.getHostAndPort())) { + serverMap.put(serverName, Collections.EMPTY_LIST); + } + } + + //add all tables that are members of the group + for(TableName tableName : groupInfo.getTables()) { + if(assignments.containsKey(tableName)) { + result.put(tableName, new HashMap>()); + result.get(tableName).putAll(serverMap); + result.get(tableName).putAll(assignments.get(tableName)); + LOG.debug("Adding assignments for "+tableName+": "+assignments.get(tableName)); + } + } + + return result; + } + + void registerMBean() { + MXBeanImpl mxBeanInfo = + MXBeanImpl.init(this, master); + MBeanUtil.registerMBean("Group", "Group", mxBeanInfo); + LOG.info("Registered Group MXBean"); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java new file mode 100644 index 0000000..f22c55d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java @@ -0,0 +1,437 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.LinkedListMultimap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; +import org.apache.hadoop.hbase.security.access.AccessControlLists; +import org.apache.hadoop.util.ReflectionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) + * It does region balance based on a table's group membership. + * + * Most assignment methods contain two exclusive code paths: Online - when the group + * table is online and Offline - when it is unavailable. + * + * During Offline, assignments are assigned based on cached information in zookeeper. + * If unavailable (ie bootstrap) then regions are assigned randombly. + * + * Once the GROUP table has been assigned, the balancer switches to Online and will then + * start providing appropriate assignments for user tables. + * + */ +@InterfaceAudience.Public +public class GroupBasedLoadBalancer implements LoadBalancer { + /** Config for pluggable load balancers */ + public static final String HBASE_GROUP_LOADBALANCER_CLASS = "hbase.group.grouploadbalancer.class"; + + private static final Log LOG = LogFactory.getLog(GroupBasedLoadBalancer.class); + + public static final Set SPECIAL_TABLES = new HashSet(); + static { + SPECIAL_TABLES.add(AccessControlLists.ACL_TABLE_NAME); + SPECIAL_TABLES.add(TableName.META_TABLE_NAME); + SPECIAL_TABLES.add(TableName.NAMESPACE_TABLE_NAME); + SPECIAL_TABLES.add(GroupInfoManager.GROUP_TABLE_NAME); + } + + private Configuration config; + private ClusterStatus clusterStatus; + private MasterServices masterServices; + private GroupInfoManager groupManager; + private LoadBalancer internalBalancer; + + //used during reflection by LoadBalancerFactory + @InterfaceAudience.Private + public GroupBasedLoadBalancer() { + } + + //This constructor should only be used for unit testing + @InterfaceAudience.Private + public GroupBasedLoadBalancer(GroupInfoManager groupManager) { + this.groupManager = groupManager; + } + + @Override + public Configuration getConf() { + return config; + } + + @Override + public void setConf(Configuration conf) { + this.config = conf; + } + + @Override + public void setClusterStatus(ClusterStatus st) { + this.clusterStatus = st; + } + + @Override + public void setMasterServices(MasterServices masterServices) { + this.masterServices = masterServices; + } + + @Override + public List balanceCluster(Map> clusterState) + throws HBaseIOException { + + if (!isOnline()) { + throw new IllegalStateException(GroupInfoManager.GROUP_TABLE_NAME+ + " is not online, unable to perform balance"); + } + + Map> correctedState = correctAssignments(clusterState); + List regionPlans = new ArrayList(); + try { + for (GroupInfo info : groupManager.listGroups()) { + Map> groupClusterState = new HashMap>(); + for (String sName : info.getServers()) { + for(ServerName curr: clusterState.keySet()) { + if(ServerName.isSameHostnameAndPort(curr, ServerName.parseServerName(sName))) { + groupClusterState.put(curr, correctedState.get(curr)); + } + } + } + List groupPlans = this.internalBalancer + .balanceCluster(groupClusterState); + if (groupPlans != null) { + regionPlans.addAll(groupPlans); + } + } + } catch (IOException exp) { + LOG.warn("Exception while balancing cluster.", exp); + regionPlans.clear(); + } + return regionPlans; + } + + @Override + public Map> roundRobinAssignment ( + List regions, List servers) throws HBaseIOException { + Map> assignments = Maps.newHashMap(); + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + Map> result = + this.internalBalancer.roundRobinAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey)); + if(result != null) { + assignments.putAll(result); + } + } + } + return assignments; + } + + @Override + public Map> retainAssignment( + Map regions, List servers) throws HBaseIOException { + if (!isOnline()) { + return offlineRetainAssignment(regions, servers); + } + return onlineRetainAssignment(regions, servers); + } + + public Map> offlineRetainAssignment( + Map regions, List servers) throws HBaseIOException { + //We will just keep assignments even if they are incorrect. + //Chances are most will be assigned correctly. + //Then we just use balance to correct the misplaced few. + //we need to correct catalog and group table assignment anyway. + return internalBalancer.retainAssignment(regions, servers); + } + + public Map> onlineRetainAssignment( + Map regions, List servers) throws HBaseIOException { + try { + Map> assignments = new TreeMap>(); + ListMultimap groupToRegion = ArrayListMultimap.create(); + List misplacedRegions = getMisplacedRegions(regions); + for (HRegionInfo region : regions.keySet()) { + if (!misplacedRegions.contains(region)) { + String groupName = groupManager.getGroupOfTable(region.getTable()); + groupToRegion.put(groupName, region); + } + } + // Now the "groupToRegion" map has only the regions which have correct + // assignments. + for (String key : groupToRegion.keySet()) { + Map currentAssignmentMap = new TreeMap(); + List regionList = groupToRegion.get(key); + GroupInfo info = groupManager.getGroup(key); + List candidateList = filterOfflineServers(info, servers); + for (HRegionInfo region : regionList) { + currentAssignmentMap.put(region, regions.get(region)); + } + assignments.putAll(this.internalBalancer.retainAssignment( + currentAssignmentMap, candidateList)); + } + + for (HRegionInfo region : misplacedRegions) { + String groupName = groupManager.getGroupOfTable( + region.getTable()); + GroupInfo info = groupManager.getGroup(groupName); + List candidateList = filterOfflineServers(info, servers); + ServerName server = this.internalBalancer.randomAssignment(region, + candidateList); + if (server != null && !assignments.containsKey(server)) { + assignments.put(server, new ArrayList()); + } else if (server != null) { + assignments.get(server).add(region); + } else { + //if not server is available assign to bogus so it ends up in RIT + if(!assignments.containsKey(BOGUS_SERVER_NAME)) { + assignments.put(BOGUS_SERVER_NAME, new ArrayList()); + } + assignments.get(BOGUS_SERVER_NAME).add(region); + } + } + return assignments; + } catch (IOException e) { + throw new HBaseIOException("Failed to do online retain assignment", e); + } + } + + @Override + public Map immediateAssignment( + List regions, List servers) throws HBaseIOException { + Map assignments = Maps.newHashMap(); + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + assignments.putAll( + this.internalBalancer.immediateAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey))); + } + } + return assignments; + } + + @Override + public ServerName randomAssignment(HRegionInfo region, + List servers) throws HBaseIOException { + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap); + List filteredServers = serverMap.get(regionMap.keySet().iterator().next()); + return this.internalBalancer.randomAssignment(region, filteredServers); + } + + private void generateGroupMaps( + List regions, + List servers, + ListMultimap regionMap, + ListMultimap serverMap) throws HBaseIOException { + try { + for (HRegionInfo region : regions) { + String groupName = groupManager.getGroupOfTable(region.getTable()); + if(groupName == null) { + LOG.warn("Group for table "+region.getTable()+" is null"); + } + regionMap.put(groupName, region); + } + for (String groupKey : regionMap.keySet()) { + GroupInfo info = groupManager.getGroup(groupKey); + serverMap.putAll(groupKey, filterOfflineServers(info, servers)); + if(serverMap.get(groupKey).size() < 1) { + serverMap.put(groupKey, BOGUS_SERVER_NAME); + } + } + } catch(IOException e) { + throw new HBaseIOException("Failed to generate group maps", e); + } + } + + private List filterOfflineServers(GroupInfo groupInfo, + List onlineServers) { + if (groupInfo != null) { + return filterServers(groupInfo.getServers(), onlineServers); + } else { + LOG.debug("Group Information found to be null. Some regions might be unassigned."); + return Collections.EMPTY_LIST; + } + } + + /** + * Filter servers based on the online servers. + * + * @param servers + * the servers + * @param onlineServers + * List of servers which are online. + * @return the list + */ + private List filterServers(Collection servers, + Collection onlineServers) { + ArrayList finalList = new ArrayList(); + for (String server : servers) { + for(ServerName curr: onlineServers) { + if(ServerName.isSameHostnameAndPort(curr, ServerName.parseServerName(server))) { + finalList.add(curr); + } + } + } + return finalList; + } + + private ListMultimap groupRegions( + List regionList) throws IOException { + ListMultimap regionGroup = ArrayListMultimap + .create(); + for (HRegionInfo region : regionList) { + String groupName = groupManager.getGroupOfTable(region.getTable()); + regionGroup.put(groupName, region); + } + return regionGroup; + } + + private List getMisplacedRegions( + Map regions) throws IOException { + List misplacedRegions = new ArrayList(); + for (HRegionInfo region : regions.keySet()) { + ServerName assignedServer = regions.get(region); + GroupInfo info = groupManager.getGroup(groupManager.getGroupOfTable(region.getTable())); + if (assignedServer != null && + (info == null || !info.containsServer(assignedServer.getHostAndPort()))) { + LOG.warn("Found misplaced region: "+region.getRegionNameAsString()+ + " on server: "+assignedServer+ + " found in group: "+groupManager.getGroupOfServer(assignedServer.getHostAndPort())+ + " outside of group: "+info.getName()); + misplacedRegions.add(region); + } + } + return misplacedRegions; + } + + private Map> correctAssignments( + Map> existingAssignments){ + Map> correctAssignments = new TreeMap>(); + List misplacedRegions = new LinkedList(); + for (ServerName sName : existingAssignments.keySet()) { + correctAssignments.put(sName, new LinkedList()); + List regions = existingAssignments.get(sName); + for (HRegionInfo region : regions) { + GroupInfo info = null; + try { + info = groupManager.getGroup(groupManager.getGroupOfTable(region.getTable())); + }catch(IOException exp){ + LOG.debug("Group information null for region of table " + region.getTable(), + exp); + } + if ((info == null) || (!info.containsServer(sName.getHostAndPort()))) { + // Misplaced region. + misplacedRegions.add(region); + } else { + correctAssignments.get(sName).add(region); + } + } + } + + //TODO bulk unassign? + //unassign misplaced regions, so that they are assigned to correct groups. + for(HRegionInfo info: misplacedRegions) { + this.masterServices.getAssignmentManager().unassign(info); + } + return correctAssignments; + } + + @Override + public void initialize() throws HBaseIOException { + // Create the balancer + Class balancerKlass = config.getClass( + HBASE_GROUP_LOADBALANCER_CLASS, + StochasticLoadBalancer.class, LoadBalancer.class); + internalBalancer = ReflectionUtils.newInstance(balancerKlass, config); + internalBalancer.setClusterStatus(clusterStatus); + internalBalancer.setMasterServices(masterServices); + internalBalancer.setConf(config); + internalBalancer.initialize(); + if (groupManager == null) { + try { + groupManager = new GroupInfoManagerImpl(masterServices); + } catch (IOException e) { + throw new HBaseIOException("Failed to load group manager", e); + } + } + } + + public boolean isOnline() { + return groupManager != null && groupManager.isOnline(); + } + + @InterfaceAudience.Private + public GroupInfoManager getGroupInfoManager() throws IOException { + return groupManager; + } + + @Override + public void regionOnline(HRegionInfo regionInfo, ServerName sn) { + } + + @Override + public void regionOffline(HRegionInfo regionInfo) { + } + + @Override + public void stop(String why) { + } + + @Override + public boolean isStopped() { + return false; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java new file mode 100644 index 0000000..fcbf9dc --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java @@ -0,0 +1,177 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.hadoop.hbase.TableName; +import org.codehaus.jackson.annotate.JsonCreator; +import org.codehaus.jackson.annotate.JsonProperty; + +import java.io.Serializable; +import java.util.Collection; +import java.util.NavigableSet; + +/** + * Stores the group information of region server groups. + */ +public class GroupInfo implements Serializable { + + public static final String DEFAULT_GROUP = "default"; + public static final String NAMESPACEDESC_PROP_GROUP = "hbase.rsgroup.name"; + public static final String TABLEDESC_PROP_GROUP = "hbase.rsgroup.name"; + public static final String TRANSITION_GROUP_PREFIX = "_transition_"; + + private String name; + private NavigableSet servers; + private NavigableSet tables; + + public GroupInfo(String name) { + this(name, Sets.newTreeSet(), Sets.newTreeSet()); + } + + //constructor for jackson + @JsonCreator + GroupInfo(@JsonProperty("name") String name, + @JsonProperty("servers") NavigableSet servers, + @JsonProperty("tables") NavigableSet tables) { + this.name = name; + this.servers = servers; + this.tables = tables; + } + + public GroupInfo(GroupInfo src) { + name = src.getName(); + servers = Sets.newTreeSet(src.getServers()); + tables = Sets.newTreeSet(src.getTables()); + } + + /** + * Get group name. + * + * @return + */ + public String getName() { + return name; + } + + /** + * Adds the server to the group. + * + * @param hostPort the server + */ + public void addServer(String hostPort){ + servers.add(hostPort); + } + + /** + * Adds a group of servers. + * + * @param hostPort the servers + */ + public void addAllServers(Collection hostPort){ + servers.addAll(hostPort); + } + + /** + * @param hostPort + * @return true, if a server with hostPort is found + */ + public boolean containsServer(String hostPort) { + return servers.contains(hostPort); + } + + /** + * Get list of servers. + * + * @return + */ + public NavigableSet getServers() { + return servers; + } + + /** + * Remove a server from this group. + * + * @param hostPort + */ + public boolean removeServer(String hostPort) { + return servers.remove(hostPort); + } + + /** + * Set of tables that are members of this group + * @return + */ + public NavigableSet getTables() { + return tables; + } + + public void addTable(TableName table) { + tables.add(table); + } + + public void addAllTables(Collection arg) { + tables.addAll(arg); + } + + public boolean containsTable(TableName table) { + return tables.contains(table); + } + + public boolean removeTable(TableName table) { + return tables.remove(table); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + sb.append("GroupName:"); + sb.append(this.name); + sb.append(", "); + sb.append(" Servers:"); + sb.append(this.servers); + return sb.toString(); + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + GroupInfo groupInfo = (GroupInfo) o; + + if (!name.equals(groupInfo.name)) return false; + if (!servers.equals(groupInfo.servers)) return false; + if (!tables.equals(groupInfo.tables)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = servers.hashCode(); + result = 31 * result + tables.hashCode(); + result = 31 * result + name.hashCode(); + return result; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java new file mode 100644 index 0000000..86eaa0e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java @@ -0,0 +1,129 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Interface used to manage GroupInfo storage. An implementation + * has the option to support offline mode. + * See {@link GroupBasedLoadBalancer} + */ +public interface GroupInfoManager { + //Assigned before user tables + public static final TableName GROUP_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR,"rsgroup"); + public static final byte[] GROUP_TABLE_NAME_BYTES = GROUP_TABLE_NAME.toBytes(); + public static final String groupZNode = "groupInfo"; + public static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); + public static final byte[] ROW_KEY = {0}; + + + /** + * Adds the group. + * + * @param groupInfo the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void addGroup(GroupInfo groupInfo) throws IOException; + + /** + * Remove a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void removeGroup(String groupName) throws IOException; + + /** + * move servers to a new group. + * @param hostPorts list of servers, must be part of the same group + * @param srcGroup + * @param dstGroup + * @return true if move was successful + * @throws java.io.IOException + */ + boolean moveServers(Set hostPorts, String srcGroup, String dstGroup) throws IOException; + + /** + * Gets the group info of server. + * + * @param hostPort the server + * @return An instance of GroupInfo + */ + GroupInfo getGroupOfServer(String hostPort) throws IOException; + + /** + * Gets the group information. + * + * @param groupName the group name + * @return An instance of GroupInfo + */ + GroupInfo getGroup(String groupName) throws IOException; + + /** + * Get the group membership of a table + * @param tableName + * @return Group name of table + * @throws java.io.IOException + */ + String getGroupOfTable(TableName tableName) throws IOException; + + /** + * Set the group membership of a set of tables + * + * @param tableNames + * @param groupName + * @throws java.io.IOException + */ + void moveTables(Set tableNames, String groupName) throws IOException; + + /** + * List the groups + * + * @return list of GroupInfo + * @throws java.io.IOException + */ + List listGroups() throws IOException; + + /** + * Refresh/reload the group information from + * the persistent store + * + * @throws java.io.IOException + */ + void refresh() throws IOException; + + /** + * Whether the manager is able to fully + * return group metadata + * + * @return + */ + boolean isOnline(); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java new file mode 100644 index 0000000..bc36ed4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java @@ -0,0 +1,683 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import com.google.protobuf.ByteString; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.MetaScanner; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This is an implementation of {@link GroupInfoManager}. Which makes + * use of an HBase table as the persistence store for the group information. + * It also makes use of zookeeper to store group information needed + * for bootstrapping during offline mode. + */ +public class GroupInfoManagerImpl implements GroupInfoManager, ServerListener { + private static final Log LOG = LogFactory.getLog(GroupInfoManagerImpl.class); + + /** Table descriptor for hbase:rsgroup catalog table */ + private final static HTableDescriptor GROUP_TABLE_DESC; + static { + GROUP_TABLE_DESC = new HTableDescriptor(GROUP_TABLE_NAME_BYTES); + GROUP_TABLE_DESC.addFamily(new HColumnDescriptor(META_FAMILY_BYTES)); + GROUP_TABLE_DESC.setMaxFileSize(1l << 32); + } + + //Access to this map should always be synchronized. + private volatile Map groupMap; + private volatile Map tableMap; + private MasterServices master; + private HTable groupTable; + private ZooKeeperWatcher watcher; + private GroupStartupWorker groupStartupWorker; + //contains list of groups that were last flushed to persistent store + private volatile Set prevGroups; + private GroupSerDe groupSerDe; + private DefaultServerUpdater defaultServerUpdater; + + + public GroupInfoManagerImpl(MasterServices master) throws IOException { + this.groupMap = Collections.EMPTY_MAP; + this.tableMap = Collections.EMPTY_MAP; + groupSerDe = new GroupSerDe(); + this.master = master; + this.watcher = master.getZooKeeper(); + groupStartupWorker = new GroupStartupWorker(this, master); + prevGroups = new HashSet(); + refresh(); + groupStartupWorker.start(); + defaultServerUpdater = new DefaultServerUpdater(this); + master.getServerManager().registerListener(this); + defaultServerUpdater.start(); + } + + /** + * Adds the group. + * + * @param groupInfo the group name + */ + @Override + public synchronized void addGroup(GroupInfo groupInfo) throws IOException { + if (groupMap.get(groupInfo.getName()) != null || + groupInfo.getName().equals(GroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group already exists: "+groupInfo.getName()); + } + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(groupInfo.getName(), groupInfo); + flushConfig(newGroupMap); + } + + @Override + public synchronized boolean moveServers(Set hostPorts, String srcGroup, String dstGroup) throws IOException { + GroupInfo src = new GroupInfo(getGroup(srcGroup)); + GroupInfo dst = new GroupInfo(getGroup(dstGroup)); + boolean foundOne = false; + for(String el: hostPorts) { + foundOne = src.removeServer(el) || foundOne; + dst.addServer(el); + } + + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(src.getName(), src); + newGroupMap.put(dst.getName(), dst); + + flushConfig(newGroupMap); + return foundOne; + } + + /** + * Gets the group info of server. + * + * @param hostPort the server + * @return An instance of GroupInfo. + */ + @Override + public GroupInfo getGroupOfServer(String hostPort) throws IOException { + for (GroupInfo info : groupMap.values()) { + if (info.containsServer(hostPort)){ + return info; + } + } + return getGroup(GroupInfo.DEFAULT_GROUP); + } + + /** + * Gets the group information. + * + * @param groupName + * the group name + * @return An instance of GroupInfo + */ + @Override + public GroupInfo getGroup(String groupName) throws IOException { + GroupInfo groupInfo = groupMap.get(groupName); + return groupInfo; + } + + + + @Override + public String getGroupOfTable(TableName tableName) throws IOException { + return tableMap.get(tableName); + } + + @Override + public synchronized void moveTables(Set tableNames, String groupName) throws IOException { + if (groupName != null && !groupMap.containsKey(groupName)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a special group"); + } + Map newGroupMap = Maps.newHashMap(groupMap); + for(TableName tableName: tableNames) { + if (tableMap.containsKey(tableName)) { + GroupInfo src = new GroupInfo(groupMap.get(tableMap.get(tableName))); + src.removeTable(tableName); + newGroupMap.put(src.getName(), src); + } + if(groupName != null) { + GroupInfo dst = new GroupInfo(newGroupMap.get(groupName)); + dst.addTable(tableName); + newGroupMap.put(dst.getName(), dst); + } + } + + flushConfig(newGroupMap); + } + + + /** + * Delete a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + @Override + public synchronized void removeGroup(String groupName) throws IOException { + if (!groupMap.containsKey(groupName) || groupName.equals(GroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a reserved group"); + } + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.remove(groupName); + flushConfig(newGroupMap); + } + + @Override + public List listGroups() throws IOException { + List list = Lists.newLinkedList(groupMap.values()); + return list; + } + + @Override + public boolean isOnline() { + return groupStartupWorker.isOnline(); + } + + @Override + public synchronized void refresh() throws IOException { + refresh(false); + } + + private synchronized void refresh(boolean forceOnline) throws IOException { + List groupList = new LinkedList(); + + //overwrite anything read from zk, group table is source of truth + //if online read from GROUP table + if (forceOnline || isOnline()) { + LOG.debug("Refershing in Online mode."); + if (groupTable == null) { + groupTable = new HTable(master.getConfiguration(), GROUP_TABLE_NAME_BYTES); + } + groupList.addAll(groupSerDe.retrieveGroupList(groupTable)); + } else { + LOG.debug("Refershing in Offline mode."); + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, groupZNode); + groupList.addAll(groupSerDe.retrieveGroupList(watcher, groupBasePath)); + } + + //refresh default group, prune + NavigableSet orphanTables = new TreeSet(); + for(String entry: master.getTableDescriptors().getAll().keySet()) { + orphanTables.add(TableName.valueOf(entry)); + } + for(TableName table : GroupBasedLoadBalancer.SPECIAL_TABLES) { + orphanTables.add(table); + } + for(GroupInfo group: groupList) { + if(!group.getName().equals(GroupInfo.DEFAULT_GROUP)) { + orphanTables.removeAll(group.getTables()); + } + } + + //This is added to the last of the list + //so it overwrites the default group loaded + //from region group table or zk + groupList.add(new GroupInfo(GroupInfo.DEFAULT_GROUP, + new TreeSet(getDefaultServers()), + orphanTables)); + + + //populate the data + HashMap newGroupMap = Maps.newHashMap(); + HashMap newTableMap = Maps.newHashMap(); + for (GroupInfo group : groupList) { + newGroupMap.put(group.getName(), group); + for(TableName table: group.getTables()) { + newTableMap.put(table, group.getName()); + } + } + groupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + prevGroups.clear(); + prevGroups.addAll(groupMap.keySet()); + } + + private synchronized Map flushConfigTable(Map newGroupMap) throws IOException { + Map newTableMap = Maps.newHashMap(); + Put put = new Put(ROW_KEY); + Delete delete = new Delete(ROW_KEY); + + //populate deletes + for(String groupName : prevGroups) { + if(!newGroupMap.containsKey(groupName)) { + delete.deleteColumns(META_FAMILY_BYTES, Bytes.toBytes(groupName)); + } + } + + //populate puts + for(GroupInfo groupInfo : newGroupMap.values()) { + RSGroupProtos.GroupInfo proto = groupSerDe.toProto(groupInfo); + put.add(META_FAMILY_BYTES, + Bytes.toBytes(groupInfo.getName()), + proto.toByteArray()); + for(TableName entry: groupInfo.getTables()) { + newTableMap.put(entry, groupInfo.getName()); + } + } + + RowMutations rowMutations = new RowMutations(ROW_KEY); + if(put.size() > 0) { + rowMutations.add(put); + } + if(delete.size() > 0) { + rowMutations.add(delete); + } + if(rowMutations.getMutations().size() > 0) { + groupTable.mutateRow(rowMutations); + } + return newTableMap; + } + + private synchronized void flushConfig(Map newGroupMap) throws IOException { + Map newTableMap; + //this should only not enter during startup + if(!isOnline()) { + LOG.error("Still in Offline mode."); + throw new IOException("Still in Offline mode."); + } + + newTableMap = flushConfigTable(newGroupMap); + + //make changes visible since it has been + //persisted in the source of truth + groupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + + try { + //Write zk data first since that's what we'll read first + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, groupZNode); + ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufUtil.PB_MAGIC); + + List zkOps = new ArrayList(newGroupMap.size()); + for(String groupName : prevGroups) { + if(!newGroupMap.containsKey(groupName)) { + String znode = ZKUtil.joinZNode(groupBasePath, groupName); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + } + } + + + for(GroupInfo groupInfo : newGroupMap.values()) { + String znode = ZKUtil.joinZNode(groupBasePath, groupInfo.getName()); + RSGroupProtos.GroupInfo proto = groupSerDe.toProto(groupInfo); + LOG.debug("Updating znode: "+znode); + ZKUtil.createAndFailSilent(watcher, znode); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, + ProtobufUtil.prependPBMagic(proto.toByteArray()))); + } + LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); + + ZKUtil.multiOrSequential(watcher, zkOps, false); + } catch (KeeperException e) { + LOG.error("Failed to write to groupZNode", e); + master.abort("Failed to write to groupZNode", e); + throw new IOException("Failed to write to groupZNode",e); + } + + prevGroups.clear(); + prevGroups.addAll(newGroupMap.keySet()); + } + + private List getOnlineRS() throws IOException { + if (master != null) { + return master.getServerManager().getOnlineServersList(); + } + try { + LOG.debug("Reading online RS from zookeeper"); + List servers = new LinkedList(); + for (String el: ZKUtil.listChildrenNoWatch(watcher, watcher.rsZNode)) { + servers.add(ServerName.parseServerName(el)); + } + return servers; + } catch (KeeperException e) { + throw new IOException("Failed to retrieve server list from zookeeper", e); + } + } + + private List getDefaultServers() throws IOException { + List defaultServers = new LinkedList(); + for(ServerName server : getOnlineRS()) { + String hostPort = server.getHostAndPort(); + boolean found = false; + for(GroupInfo groupInfo : groupMap.values()) { + if(!GroupInfo.DEFAULT_GROUP.equals(groupInfo.getName()) && + groupInfo.containsServer(hostPort)) { + found = true; + break; + } + } + if(!found) { + defaultServers.add(hostPort); + } + } + return defaultServers; + } + + private synchronized void updateDefaultServers(NavigableSet hostPort) throws IOException { + if(!isOnline()) { + LOG.info("Offline mode. Skipping update of default servers"); + return; + } + GroupInfo info = groupMap.get(GroupInfo.DEFAULT_GROUP); + GroupInfo newInfo = new GroupInfo(info.getName(), hostPort, info.getTables()); + HashMap newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(newInfo.getName(), newInfo); + flushConfig(newGroupMap); + } + + @Override + public void serverAdded(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + @Override + public void serverRemoved(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + private static class DefaultServerUpdater extends Thread { + private static final Log LOG = LogFactory.getLog(DefaultServerUpdater.class); + private GroupInfoManagerImpl mgr; + private boolean hasChanged = false; + + public DefaultServerUpdater(GroupInfoManagerImpl mgr) { + this.mgr = mgr; + } + + public void run() { + List prevDefaultServers = new LinkedList(); + while(!mgr.master.isAborted() || !mgr.master.isStopped()) { + try { + LOG.info("Updating default servers."); + List servers = mgr.getDefaultServers(); + Collections.sort(servers); + if(!servers.equals(prevDefaultServers)) { + mgr.updateDefaultServers(new TreeSet(servers)); + prevDefaultServers = servers; + LOG.info("Updated with servers: "+servers.size()); + } + try { + synchronized (this) { + if(!hasChanged) { + wait(); + } + hasChanged = false; + } + } catch (InterruptedException e) { + } + } catch (IOException e) { + LOG.warn("Failed to update default servers", e); + } + } + } + + public void serverChanged() { + synchronized (this) { + hasChanged = true; + this.notify(); + } + } + } + + + private static class GroupStartupWorker extends Thread { + private static final Log LOG = LogFactory.getLog(GroupStartupWorker.class); + + private Configuration conf; + private volatile boolean isOnline = false; + private MasterServices masterServices; + private GroupInfoManagerImpl groupInfoManager; + + public GroupStartupWorker(GroupInfoManagerImpl groupInfoManager, + MasterServices masterServices) { + this.conf = masterServices.getConfiguration(); + this.masterServices = masterServices; + this.groupInfoManager = groupInfoManager; + setName(GroupStartupWorker.class.getName()+"-"+masterServices.getServerName()); + setDaemon(true); + } + + @Override + public void run() { + if(waitForGroupTableOnline()) { + LOG.info("GroupBasedLoadBalancer is now online"); + } + } + + public boolean waitForGroupTableOnline() { + final List foundRegions = new LinkedList(); + final List assignedRegions = new LinkedList(); + final AtomicBoolean found = new AtomicBoolean(false); + final TableStateManager tsm = masterServices.getTableStateManager(); + boolean createSent = false; + while (!found.get() && isMasterRunning()) { + foundRegions.clear(); + assignedRegions.clear(); + found.set(true); + try { + + final HConnection conn = HConnectionManager.getConnection(conf); + final HTable nsTable =new HTable(TableName.NAMESPACE_TABLE_NAME, conn); + final HTable groupTable = new HTable(GroupInfoManager.GROUP_TABLE_NAME, conn); + boolean rootMetaFound = + masterServices.getMetaTableLocator().verifyMetaRegionLocation( + conn, + masterServices.getZooKeeper(), + 1); + final AtomicBoolean nsFound = new AtomicBoolean(false); + if (rootMetaFound) { + + MetaScanner.MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() { + @Override + public boolean processRow(Result row) throws IOException { + HRegionInfo info = HRegionInfo.getHRegionInfo(row); + if (info != null) { + Cell serverCell = + row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + if (Bytes.equals(GROUP_TABLE_NAME_BYTES, info.getTableName()) && + serverCell != null) { + ServerName sn = + ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell)); + if (sn == null) { + found.set(false); + } else if (tsm.isTableState(GROUP_TABLE_NAME, TableState.State.ENABLED)) { + try { + HBaseProtos.RegionSpecifier regionSpecifier = + HBaseProtos.RegionSpecifier.newBuilder() + .setValue(ByteString.copyFrom(row.getRow())) + .setType( + HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .build(); + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ClientProtos.GetRequest req = + ClientProtos.GetRequest.newBuilder() + .setRegion(regionSpecifier) + .setGet(ProtobufUtil.toGet(new Get(ROW_KEY))).build(); + rs.get(null, req); + assignedRegions.add(info); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + foundRegions.add(info); + } + if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) { + Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + ServerName sn = null; + if(cell != null) { + sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell)); + } + if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME, + TableState.State.ENABLED)) { + try { + HBaseProtos.RegionSpecifier regionSpecifier = + HBaseProtos.RegionSpecifier.newBuilder() + .setValue(ByteString.copyFrom(row.getRow())) + .setType( + HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .build(); + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ClientProtos.GetRequest req = + ClientProtos.GetRequest.newBuilder() + .setRegion(regionSpecifier) + .setGet(ProtobufUtil.toGet(new Get(ROW_KEY))).build(); + rs.get(null, req); + nsFound.set(true); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + } + } + return true; + } + }; + MetaScanner.metaScan(conf, visitor); + // if no regions in meta then we have to create the table + if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) { + groupInfoManager.createGroupTable(masterServices); + createSent = true; + } + LOG.info("Group table: " + GROUP_TABLE_NAME + " isOnline: " + found.get() + + ", regionCount: " + foundRegions.size() + ", assignCount: " + + assignedRegions.size() + ", rootMetaFound: "+rootMetaFound); + found.set(found.get() && assignedRegions.size() == foundRegions.size() + && foundRegions.size() > 0); + } else { + LOG.info("Waiting for catalog tables to come online"); + found.set(false); + } + if (found.get()) { + LOG.debug("With group table online, refreshing cached information."); + groupInfoManager.refresh(true); + isOnline = true; + //flush any inconsistencies between ZK and HTable + groupInfoManager.flushConfig(groupInfoManager.groupMap); + } + } catch(Exception e) { + found.set(false); + LOG.warn("Failed to perform check", e); + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + LOG.info("Sleep interrupted", e); + } + } + return found.get(); + } + + public boolean isOnline() { + return isOnline; + } + + private boolean isMasterRunning() { + return !masterServices.isAborted() && !masterServices.isStopped(); + } + } + + private void createGroupTable(MasterServices masterServices) throws IOException { + HRegionInfo newRegions[] = new HRegionInfo[]{ + new HRegionInfo(GROUP_TABLE_DESC.getTableName(), null, null)}; + //we need to create the table this way to bypass + //checkInitialized + masterServices.getExecutorService() + .submit(new CreateTableHandler( + masterServices, + masterServices.getMasterFileSystem(), + GROUP_TABLE_DESC, + masterServices.getConfiguration(), + newRegions, + masterServices).prepare()); + //wait for region to be online + int tries = 600; + while(masterServices.getAssignmentManager().getRegionStates() + .getRegionServerOfRegion(newRegions[0]) == null && tries > 0) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new IOException("Wait interrupted", e); + } + tries--; + } + if(tries <= 0) { + throw new IOException("Failed to create group table."); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMasterObserver.java new file mode 100644 index 0000000..b909148 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMasterObserver.java @@ -0,0 +1,148 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +import java.io.IOException; + +/** + * This class is a required component to enable Region Server Groups. + * It must be installed as a system coprocessor on the master. + */ +public class GroupMasterObserver extends BaseMasterObserver { + private static final org.apache.commons.logging.Log LOG = LogFactory.getLog(GroupMasterObserver.class); + + private MasterCoprocessorEnvironment menv; + private GroupAdminEndpoint groupAdminEndpoint; + private GroupAdmin groupAdmin; + private MasterServices master; + + @Override + public void start(CoprocessorEnvironment ctx) throws IOException { + menv = (MasterCoprocessorEnvironment)ctx; + master = menv.getMasterServices(); + } + + @Override + public void preCreateTable(ObserverContext ctx, HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + boolean bypassSecurity = false; + String groupName = desc.getValue(GroupInfo.TABLEDESC_PROP_GROUP); + if(groupName == null) { + groupName = + master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString()) + .getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + bypassSecurity = true; + } else { + //we remove the property since it is ephemeral + desc.remove(GroupInfo.TABLEDESC_PROP_GROUP); + } + if(groupName == null) { + bypassSecurity = true; + groupName = GroupInfo.DEFAULT_GROUP; + } + GroupInfo groupInfo = getGroupAdmin().getGroupInfo(groupName); + if(groupInfo == null) { + throw new ConstraintException("Group "+groupName+" does not exist."); + } + if(!groupInfo.containsTable(desc.getTableName())) { + //Bypass security check if group assignment is taken from namespace + if(!bypassSecurity) { + groupAdminEndpoint.requireAdmin("moveTables(preCreateTable)"); + } + LOG.debug("Pre-moving table "+desc.getTableName()+" to group "+groupName); + getGroupAdmin().moveTables(Sets.newHashSet(desc.getTableName()), groupName); + } + } + + @Override + public void postDeleteTable(ObserverContext ctx, + TableName tableName) throws IOException { + try { + GroupInfo group = getGroupAdmin().getGroupInfoOfTable(tableName); + if(group != null) { + LOG.debug("Removing deleted table from table group "+group.getName()); + getGroupAdmin().moveTables(Sets.newHashSet(tableName), null); + } + } catch (ConstraintException ex) { + LOG.debug("Failed to perform group information cleanup for table: "+tableName, ex); + } catch (IOException ex) { + LOG.debug("Failed to perform group information cleanup for table: "+tableName, ex); + } + } + + @Override + public void preCreateNamespace(ObserverContext ctx, NamespaceDescriptor ns) throws IOException { + if(!ns.getName().equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) && + !ns.getName().equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR) && + ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP) == null) { + throw new ConstraintException("Non-reserved namespaces must associate with a group"); + } + String group = ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(group != null && getGroupAdmin().getGroupInfo(group) == null) { + throw new ConstraintException("Region server group "+group+" does not exit"); + } + } + + @Override + public void preModifyNamespace(ObserverContext ctx, NamespaceDescriptor ns) throws IOException { + preCreateNamespace(ctx, ns); + NamespaceDescriptor curr = master.getNamespaceDescriptor(ns.getName()); + if(curr.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP) != null && + !curr.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP) + .equals(ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP))) { + throw new ConstraintException("Region server group affiliation can only be set once."); + } + } + + @Override + public void preCloneSnapshot(ObserverContext ctx, HBaseProtos.SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + preCreateTable(ctx, hTableDescriptor, null); + } + + private GroupAdminEndpoint getGroupAdminEndpoint() { + if(groupAdminEndpoint == null) { + groupAdminEndpoint = (GroupAdminEndpoint) + menv.getMasterServices().getMasterCoprocessorHost() + .findCoprocessor(GroupAdminEndpoint.class.getName()); + } + return groupAdminEndpoint; + } + + private GroupAdmin getGroupAdmin() { + if(groupAdmin == null) { + groupAdmin = getGroupAdminEndpoint().getGroupAdmin(); + } + return groupAdmin; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMoveServerWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMoveServerWorker.java new file mode 100644 index 0000000..f2eab9a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMoveServerWorker.java @@ -0,0 +1,206 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.MasterServices; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * This is a worker class responsible for moving a set of servers + * from source group to target group. Supplied servers must be part + * of the source group. + * + * Servers are then moved to a temporary transition group. Any + * regions are then unassigned from the server. Once the servers + * are drained of any regions the servers are then moved to the + * destination group. + */ +public class GroupMoveServerWorker implements Runnable { + private static final Log LOG = LogFactory.getLog(GroupMoveServerWorker.class); + + private MasterServices master; + private MoveServerPlan plan; + private String transGroup; + private String sourceGroup; + private GroupInfoManager groupManager; + private Map serversInTransition; + private volatile boolean succeeded; + + public GroupMoveServerWorker(Server master, Map serversInTransition, + GroupInfoManager groupManager, + MoveServerPlan plan) throws IOException { + this.serversInTransition = serversInTransition; + this.groupManager = groupManager; + this.master = (MasterServices)master; + this.plan = plan; + + synchronized (serversInTransition) { + //check server list + sourceGroup = groupManager.getGroupOfServer(plan.getServers().iterator().next()).getName(); + if(groupManager.getGroup(plan.getTargetGroup()) == null) { + throw new ConstraintException("Target group does not exist: "+plan.getTargetGroup()); + } + for(String server: plan.getServers()) { + if (serversInTransition.containsKey(server)) { + throw new DoNotRetryIOException( + "Server list contains a server that is already being moved: "+server); + } + String tmpGroup = groupManager.getGroupOfServer(server).getName(); + if (sourceGroup != null && !tmpGroup.equals(sourceGroup)) { + throw new DoNotRetryIOException( + "Move server request should only come from one source group. "+ + "Expecting only "+sourceGroup+" but contains "+tmpGroup); + } + } + if(sourceGroup.equals(plan.getTargetGroup())) { + throw new ConstraintException( + "Target group is the same as source group: "+plan.getTargetGroup()); + } + //update the servers as in transition + for(String server: plan.getServers()) { + serversInTransition.put(server, plan.getTargetGroup()); + } + if (!sourceGroup.startsWith(GroupInfo.TRANSITION_GROUP_PREFIX)) { + transGroup = GroupInfo.TRANSITION_GROUP_PREFIX+ + System.currentTimeMillis()+"_"+sourceGroup+"-"+plan.getTargetGroup(); + groupManager.addGroup(new GroupInfo(transGroup)); + } + groupManager.moveServers(plan.getServers(), sourceGroup, + transGroup!=null?transGroup:plan.getTargetGroup()); + } + } + + @Override + public void run() { + String name = "GroupMoveServer-"+transGroup+"-"+plan.getTargetGroup(); + Thread.currentThread().setName(name); + try { + boolean found; + do { + LOG.debug(name+" is awake"); + found = false; + for(String rs: plan.getServers()) { + List regions = getOnlineRegions(rs); + LOG.info("Unassigining "+regions.size()+" regions from server "+rs); + if(regions.size() > 0) { + //TODO bulk unassign + for(HRegionInfo region: regions) { + master.getAssignmentManager().unassign(region); + } + found = true; + } + } + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + } + } while(found); + succeeded = true; + LOG.info("Move server done: "+sourceGroup+"->"+plan.getTargetGroup()); + } catch(Exception e) { + succeeded = false; + LOG.error("Caught exception while running", e); + } + try { + complete(); + } catch (IOException e) { + succeeded = false; + LOG.error("Failed to complete move", e); + } + } + + private List getOnlineRegions(String hostPort) throws IOException { + List regions = new LinkedList(); + for(Map.Entry el: + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + if (el.getValue().getHostAndPort().equals(hostPort)) { + regions.add(el.getKey()); + } + } + return regions; + } + + static class MoveServerPlan { + private Set servers; + private String targetGroup; + + public MoveServerPlan(Set servers, String targetGroup) { + this.servers = servers; + this.targetGroup = targetGroup; + } + + public Set getServers() { + return servers; + } + + public String getTargetGroup() { + return targetGroup; + } + } + + public void complete() throws IOException { + try { + String tmpSourceGroup = sourceGroup; + if (transGroup != null) { + tmpSourceGroup = transGroup; + LOG.debug("Moving "+plan.getServers().size()+ + " servers from transition group: "+transGroup+" to final group: "+plan.getTargetGroup()); + } + if (succeeded) { + groupManager.moveServers(plan.getServers(), tmpSourceGroup, plan.getTargetGroup()); + if (transGroup != null) { + groupManager.removeGroup(transGroup); + LOG.debug("Move done "+plan.getServers().size()+ + " servers from transition group: "+transGroup+" to final group: "+plan.getTargetGroup()); + } + LOG.debug("Move done "+plan.getServers().size()+ + " servers from source group: "+sourceGroup+" to final group: "+plan.getTargetGroup()); + } else { + //rollback + groupManager.moveServers(plan.getServers(), tmpSourceGroup, sourceGroup); + if (transGroup != null) { + groupManager.removeGroup(transGroup); + LOG.debug("Rollback done "+plan.getServers().size()+ + " servers from transition group: "+transGroup+" to old group: "+sourceGroup); + } + } + } finally { + //remove servers in transition + synchronized(serversInTransition) { + for(String server: plan.getServers()) { + serversInTransition.remove(server); + } + } + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java new file mode 100644 index 0000000..adecdb6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java @@ -0,0 +1,116 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.type.TypeReference; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.NavigableMap; + +//TODO do better encapsulation of SerDe logic from GroupInfoManager and GroupTracker +class GroupSerDe { + private static final Log LOG = LogFactory.getLog(GroupSerDe.class); + + public GroupSerDe() { + + } + + public List retrieveGroupList(HTable groupTable) throws IOException { + List groupInfoList = Lists.newArrayList(); + Result result = groupTable.get(new Get(GroupInfoManager.ROW_KEY)); + if(!result.isEmpty()) { + NavigableMap> dataMap = result.getNoVersionMap(); + for(byte[] groupName: dataMap.get(GroupInfoManager.META_FAMILY_BYTES).keySet()) { + RSGroupProtos.GroupInfo proto = + RSGroupProtos.GroupInfo.parseFrom( + dataMap.get(GroupInfoManager.META_FAMILY_BYTES).get(groupName)); + groupInfoList.add(toPojo(proto)); + } + } + return groupInfoList; + } + + public List retrieveGroupList(ZooKeeperWatcher watcher, + String groupBasePath) throws IOException { + List groupInfoList = Lists.newArrayList(); + //Overwrite any info stored by table, this takes precedence + try { + if(ZKUtil.checkExists(watcher, groupBasePath) != -1) { + for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) { + byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + groupInfoList.add(toPojo(RSGroupProtos.GroupInfo.parseFrom(bis))); + } + } + LOG.debug("Read ZK GroupInfo count:" + groupInfoList.size()); + } + } catch (KeeperException e) { + throw new IOException("Failed to read groupZNode",e); + } catch (DeserializationException e) { + throw new IOException("Failed to read groupZNode",e); + } catch (InterruptedException e) { + throw new IOException("Failed to read groupZNode",e); + } + return groupInfoList; + } + + public GroupInfo toPojo(RSGroupProtos.GroupInfo proto) { + GroupInfo groupInfo = new GroupInfo(proto.getName()); + groupInfo.addAllServers(proto.getServersList()); + for(HBaseProtos.TableName pTableName: proto.getTablesList()) { + groupInfo.addTable(ProtobufUtil.toTableName(pTableName)); + } + return groupInfo; + } + + public RSGroupProtos.GroupInfo toProto(GroupInfo pojo) { + List tables = + new ArrayList(pojo.getTables().size()); + for(TableName arg: pojo.getTables()) { + tables.add(ProtobufUtil.toProtoTableName(arg)); + } + return RSGroupProtos.GroupInfo.newBuilder().setName(pojo.getName()) + .addAllServers(pojo.getServers()) + .addAllTables(tables).build(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupTracker.java new file mode 100644 index 0000000..0423b4c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupTracker.java @@ -0,0 +1,340 @@ +/* + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class GroupTracker extends ZooKeeperNodeTracker { + private static final Log LOG = LogFactory.getLog(GroupTracker.class); + + private List listeners = Collections.synchronizedList(new ArrayList()); + private GroupSerDe groupSerDe = new GroupSerDe(); + private volatile Map groupMap = new HashMap(); + private volatile Map serverMap = new HashMap(); + private RegionServerTracker rsTracker; + private volatile boolean started = false; + + /** + * Constructs a new ZK node tracker. + *

+ *

After construction, use {@link #start} to kick off tracking. + * + * @param watcher + * @param abortable + */ + public GroupTracker(ZooKeeperWatcher watcher, Abortable abortable) throws IOException { + //TODO make period configurable + super(watcher, + ZKUtil.joinZNode(watcher.baseZNode, GroupInfoManager.groupZNode), + abortable!=null?abortable:new PersistentAbortable(10000)); + if(abortable == null) { + ((PersistentAbortable)this.abortable).setGroupTracker(this); + } + rsTracker = new RegionServerTracker(watcher, abortable, this); + try { + ZKUtil.listChildrenAndWatchThem(watcher, node); + rsTracker.start(); + } catch (KeeperException e) { + throw new IOException("Failed to start RS tracker", e); + } + } + + public void addListener(Listener listener) { + listeners.add(listener); + } + + public void removeListener(Listener listener) { + listeners.remove(listener); + } + + @Override + public synchronized void start() { + super.start(); + started = true; + } + + @Override + public void nodeCreated(String path) { + if (path.equals(node)) { + refresh(); + } + } + + @Override + public void nodeDataChanged(String path) { + if (path.equals(node)) { + nodeCreated(path); + } + } + + @Override + public void nodeChildrenChanged(String path) { + if (path.startsWith(node)) { + refresh(); + } + } + + public void blockUntilReady(int timeout) throws InterruptedException, IOException { + blockUntilAvailable(timeout, false); + if(getData(false) != null) { + refresh(false); + } + } + + private void refresh() { + try { + refresh(false); + } catch (IOException e) { + this.abortable.abort("Failed to read group znode", e); + } + } + + private synchronized void refresh(boolean force) throws IOException { + List onlineRS = rsTracker.getOnlineServers(); + Set hostPorts = new HashSet(); + for(ServerName entry: onlineRS) { + hostPorts.add(entry.getHostAndPort()); + } + Map tmpGroupMap = new HashMap(); + Map tmpServerMap = new HashMap(); + for(GroupInfo groupInfo: listGroups()) { + tmpGroupMap.put(groupInfo.getName(), groupInfo); + for(String server: groupInfo.getServers()) { + tmpServerMap.put(server, groupInfo); + hostPorts.remove(server); + } + } + GroupInfo groupInfo = tmpGroupMap.get(GroupInfo.DEFAULT_GROUP); + groupInfo.addAllServers(hostPorts); + for(String entry: hostPorts) { + tmpServerMap.put(entry, groupInfo); + } + + //when reading sync on "this" if groupMap<->serverMap + //invariant needs to be guaranteed + groupMap = tmpGroupMap; + serverMap = tmpServerMap; + + Map map = getGroupMap(); + for(Listener listener : listeners) { + listener.groupMapChanged(map); + } + } + + private List listGroups() throws IOException { + return groupSerDe.retrieveGroupList(watcher, node); + } + + public GroupInfo getGroup(String name) { + GroupInfo groupInfo = groupMap.get(name); + return groupInfo; + } + + public GroupInfo getGroupOfServer(String hostPort) { + GroupInfo groupInfo = serverMap.get(hostPort); + return groupInfo; + } + + public Map getGroupMap() { + return Collections.unmodifiableMap(groupMap); + } + + public interface Listener { + public void groupMapChanged(Map groupMap); + } + + + /** + * This class is copied for RegionServerTracker + * We need our own since the other one was tied to ServerManager + * and thus the master + */ + private static class RegionServerTracker extends ZooKeeperListener { + private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); + private volatile List regionServers = new ArrayList(); + private Abortable abortable; + private GroupTracker groupTracker; + + public RegionServerTracker(ZooKeeperWatcher watcher, + Abortable abortable, GroupTracker groupTracker) { + super(watcher); + this.abortable = abortable; + this.groupTracker = groupTracker; + } + + public void start() throws KeeperException, IOException { + watcher.registerListener(this); + refresh(); + } + + private void add(final List servers) throws IOException { + List temp = new ArrayList(); + for (String n: servers) { + ServerName sn = ServerName.parseServerName(ZKUtil.getNodeName(n)); + temp.add(sn); + } + regionServers = temp; + //we're refreshing groups, since default membership + //is dynamic and new servers may end up as new default group members + refreshGroups(); + } + + private void remove(final ServerName sn) { + List temp = new ArrayList(); + for(ServerName el: regionServers) { + if(!sn.equals(el)) { + temp.add(el); + } + } + regionServers = temp; + refreshGroups(); + } + + private void refreshGroups() { + if(groupTracker.started && groupTracker.getData(false) != null) { + groupTracker.refresh(); + } + } + + public void refresh() throws KeeperException, IOException { + List servers = + ZKUtil.listChildrenAndWatchThem(watcher, watcher.rsZNode); + add(servers); + } + + @Override + public void nodeDeleted(String path) { + if (path.startsWith(watcher.rsZNode)) { + String serverName = ZKUtil.getNodeName(path); + LOG.info("RegionServer ephemeral node deleted, processing expiration [" + + serverName + "]"); + ServerName sn = ServerName.parseServerName(serverName); + remove(sn); + } + } + + @Override + public void nodeChildrenChanged(String path) { + if (path.equals(watcher.rsZNode)) { + try { + List servers = + ZKUtil.listChildrenAndWatchThem(watcher, watcher.rsZNode); + add(servers); + } catch (IOException e) { + abortable.abort("Unexpected zk exception getting RS nodes", e); + } catch (KeeperException e) { + abortable.abort("Unexpected zk exception getting RS nodes", e); + } + } + } + + /** + * Gets the online servers. + * @return list of online servers + */ + public List getOnlineServers() { + return regionServers; + } + } + + private static class Refresher extends Thread { + private final static Log LOG = LogFactory.getLog(Refresher.class); + private GroupTracker groupTracker; + private volatile boolean isRunning = true; + private int period; + + public Refresher(GroupTracker groupTracker, int period) { + this.groupTracker = groupTracker; + this.period = period; + this.setDaemon(true); + } + + public boolean isRunning() { + return isRunning; + } + + @Override + public void run() { + while(true) { + try { + groupTracker.rsTracker.refresh(); + groupTracker.refresh(true); + LOG.info("Recovery refresh successful"); + isRunning = false; + return; + } catch (IOException e) { + LOG.warn("Failed to refresh", e); + } catch (KeeperException e) { + LOG.warn("Failed to refresh", e); + } + try { + Thread.sleep(period); + } catch (InterruptedException e) { + } + } + } + } + + private static class PersistentAbortable implements Abortable { + private final Log LOG = LogFactory.getLog(Abortable.class); + private Refresher refresher; + private GroupTracker groupTracker; + private int period; + + + public PersistentAbortable(int period) { + this.period = period; + } + + public void setGroupTracker(GroupTracker groupTracker) { + this.groupTracker = groupTracker; + } + + @Override + public void abort(String why, Throwable e) { + LOG.warn("Launching referesher because of abort: "+why, e); + if(refresher == null || !refresher.isRunning()) { + refresher = new Refresher(groupTracker, period); + } + } + + @Override + public boolean isAborted() { + return false; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java new file mode 100644 index 0000000..a9acbc5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java @@ -0,0 +1,68 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public interface MXBean { + + public Map> getServersByGroup() throws IOException; + + public List getGroups() throws IOException; + + public Map getServersInTransition() throws IOException; + + public static class GroupInfoBean { + + private String name; + private List servers; + private List tables; + + //Need this to convert NavigableSet to List + public GroupInfoBean(GroupInfo groupInfo) { + this.name = groupInfo.getName(); + this.servers = new LinkedList(); + this.servers.addAll(groupInfo.getServers()); + this.tables = new LinkedList(); + for(TableName tableName: groupInfo.getTables()) { + this.tables.add(tableName.getNameAsString()); + } + } + + public String getName() { + return name; + } + + public List getServers() { + return servers; + } + + public List getTables() { + return tables; + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java new file mode 100644 index 0000000..efe6b9f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java @@ -0,0 +1,85 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.MasterServices; + +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public class MXBeanImpl implements MXBean { + private static final Log LOG = LogFactory.getLog(MXBeanImpl.class); + + private static MXBeanImpl instance = null; + + private GroupAdmin groupAdmin; + private MasterServices master; + + public synchronized static MXBeanImpl init( + final GroupAdmin groupAdmin, + MasterServices master) { + if (instance == null) { + instance = new MXBeanImpl(groupAdmin, master); + } + return instance; + } + + protected MXBeanImpl(final GroupAdmin groupAdmin, + MasterServices master) { + this.groupAdmin = groupAdmin; + this.master = master; + } + + @Override + public Map> getServersByGroup() throws IOException { + Map> data = new HashMap>(); + for (final ServerName entry : + master.getServerManager().getOnlineServersList()) { + GroupInfo groupInfo = groupAdmin.getGroupOfServer(entry.getHostAndPort()); + if(!data.containsKey(groupInfo.getName())) { + data.put(groupInfo.getName(), new LinkedList()); + } + data.get(groupInfo.getName()).add(entry.getHostAndPort()); + } + return data; + } + + @Override + public List getGroups() throws IOException { + LinkedList list = new LinkedList(); + for(GroupInfo group: groupAdmin.listGroups()) { + list.add(new GroupInfoBean(group)); + } + return list; + } + + @Override + public Map getServersInTransition() throws IOException { + return groupAdmin.listServersInTransition(); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 7c7f0b6..5dcf57a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1009,7 +1009,7 @@ public class AssignmentManager { setEnabledTable(tableName); } LOG.info("Assigning " + region.getRegionNameAsString() + - " to " + plan.getDestination().toString()); + " to " + plan.getDestination()); // Transition RegionState to PENDING_OPEN regionStates.updateRegionState(region, State.PENDING_OPEN, plan.getDestination()); @@ -1188,8 +1188,7 @@ public class AssignmentManager { || existingPlan.getDestination() == null || !destServers.contains(existingPlan.getDestination())) { newPlan = true; - randomPlan = new RegionPlan(region, null, - balancer.randomAssignment(region, destServers)); + randomPlan = new RegionPlan(region, null, balancer.randomAssignment(region, destServers)); if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) { List regions = new ArrayList(1); regions.add(region); @@ -1381,6 +1380,14 @@ public class AssignmentManager { throw new IOException("Unable to determine a plan to assign region(s)"); } + if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + // Found no plan for some regions, put those regions in RIT + for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { + regionStates.updateRegionState(hri, State.FAILED_OPEN); + } + bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); + } + assign(regions.size(), servers.size(), "retainAssignment=true", bulkPlan); } @@ -1411,6 +1418,14 @@ public class AssignmentManager { throw new IOException("Unable to determine a plan to assign region(s)"); } + if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + // Found no plan for some regions, put those regions in RIT + for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { + regionStates.updateRegionState(hri, State.FAILED_OPEN); + } + bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); + } + processFavoredNodes(regions); assign(regions.size(), servers.size(), "round-robin=true", bulkPlan); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8c3027a..42b1155 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1093,11 +1093,17 @@ public class HMaster extends HRegionServer implements MasterServices, Server { final byte[] destServerName) throws HBaseIOException { RegionState regionState = assignmentManager.getRegionStates(). getRegionState(Bytes.toString(encodedRegionName)); - if (regionState == null) { + + HRegionInfo hri; + if (Bytes.toString(encodedRegionName) + .equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) { + hri = HRegionInfo.FIRST_META_REGIONINFO; + } else if (regionState != null) { + hri = regionState.getRegion(); + } else { throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); } - HRegionInfo hri = regionState.getRegion(); ServerName dest; if (destServerName == null || destServerName.length == 0) { LOG.info("Passed destination servername is null/empty so " + @@ -1110,7 +1116,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return; } } else { - dest = ServerName.valueOf(Bytes.toString(destServerName)); + ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName)); + dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate)); + if (dest == null) { + LOG.debug("Unable to determine a plan to assign " + hri); + return; + } if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) { // To avoid unnecessary region moving later by balancer. Don't put user @@ -1956,4 +1967,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } return tableNames; } + + @Override + public LoadBalancer getLoadBalancer() { + return balancer; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index e24d745..abd0268 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -48,6 +48,9 @@ import org.apache.hadoop.hbase.Stoppable; @InterfaceAudience.Private public interface LoadBalancer extends Configurable, Stoppable { + //used to signal to the caller that the region(s) cannot be assigned + ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("127.0.0.1,1,1"); + /** * Set the current cluster status. This allows a LoadBalancer to map host name to a server * @param st diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 7733256..1f07b18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -261,4 +261,9 @@ public interface MasterServices extends Server { * @throws IOException */ public List listTableNamesByNamespace(String name) throws IOException; + + /** + * @return load balancer + */ + public LoadBalancer getLoadBalancer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 733f9dc..9584659 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -423,8 +423,8 @@ public class AccessController extends BaseMasterAndRegionObserver * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - private void requirePermission(String request, TableName tableName, byte[] family, byte[] qualifier, - Action... permissions) throws IOException { + public void requirePermission(String request, TableName tableName, byte[] family, + byte[] qualifier, Action... permissions) throws IOException { User user = getActiveUser(); AuthResult result = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java new file mode 100644 index 0000000..4670465 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java @@ -0,0 +1,422 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +import javax.management.MBeanServer; +import javax.management.ObjectName; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verify; + +@Category({MediumTests.class}) +public class TestGroups extends TestGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestGroups.class); + private static HMaster master; + private static GroupAdminEndpoint groupEndpoint; + private static boolean init = false; + + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + GroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().setBoolean( + HConstants.ZOOKEEPER_USEMULTI, + true); + TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes", + GroupMasterObserver.class.getName() + "," + + GroupAdminEndpoint.class.getName()); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + ""+NUM_SLAVES_BASE); + + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + groupEndpoint = + (GroupAdminEndpoint)master.getMasterCoprocessorHost().findCoprocessor(GroupAdminEndpoint + .class.getName()); + + //wait for balancer to come online + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((GroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); + } + }); + admin.setBalancerRunning(false,true); + groupAdmin = new VerifyingGroupAdminClient(TEST_UTIL.getConfiguration()); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeMethod() throws Exception { + if(!init) { + init = true; + afterMethod(); + } + + } + + @After + public void afterMethod() throws Exception { + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + + int missing = NUM_SLAVES_BASE + 1 - cluster.getClusterStatus().getServers().size(); + LOG.info("Restoring servers: "+missing); + for(int i=0; i() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + == NUM_SLAVES_BASE; + } + }); + } + + @Test + public void testJmx() throws Exception { + MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); + Iterator it = mBeanServer.queryNames(new ObjectName("hadoop:name=Group,service=Group"), null).iterator(); + //verify it was loaded properly + assertEquals("hadoop:name=Group,service=Group", it.next().getCanonicalName()); + + final MXBeanImpl info = MXBeanImpl.init(groupAdmin, master); + GroupInfo defaultGroup = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertEquals(2, info.getGroups().size()); + assertEquals(defaultGroup.getName(), info.getGroups().get(0).getName()); + assertEquals(defaultGroup.getServers(), Sets.newTreeSet(info.getGroups().get(0).getServers())); + assertEquals(defaultGroup.getServers(), Sets.newTreeSet(info.getServersByGroup().get(GroupInfo.DEFAULT_GROUP))); + assertEquals(0, info.getServersInTransition().size()); + + GroupInfo barGroup = addGroup(groupAdmin, "bar", 3); + TableName tableName1 = TableName.valueOf(tablePrefix+"_testJmx1"); + TableName tableName2 = TableName.valueOf(tablePrefix+"_testJmx2"); + TEST_UTIL.createTable(tableName1, Bytes.toBytes("f")); + TEST_UTIL.createTable(tableName2, Bytes.toBytes("f")); + groupAdmin.moveTables(Sets.newHashSet(tableName2), barGroup.getName()); + assertEquals(3, info.getGroups().size()); + + int defaultIndex = -1; + int barIndex = -1; + + for(int i=0; i serversInTransition = new HashMap(); + + Runnable failedRunnable = + new GroupMoveServerWorker(mockedMaster, + serversInTransition, + ((GroupAdminImpl)groupEndpoint.getGroupAdmin()).getGroupInfoManager(), + new GroupMoveServerWorker.MoveServerPlan(barGroup.getServers(), fooGroup.getName())); + Thread failedMoveServerThread = new Thread(failedRunnable); + failedMoveServerThread.start(); + failedMoveServerThread.join(); + verify(mockedMaster,atLeastOnce()).getAssignmentManager(); + barGroup = groupAdmin.getGroupInfo("bar"); + assertEquals(0, serversInTransition.size()); + assertEquals(3, barGroup.getServers().size()); + assertEquals(4, groupAdmin.listGroups().size()); + } + + @Test + public void testNamespaceCreateAndAssign() throws Exception { + LOG.info("testNamespaceCreateAndAssign"); + String nsName = tablePrefix+"_foo"; + final TableName tableName = TableName.valueOf(nsName, tablePrefix + "_testCreateAndAssign"); + GroupInfo appInfo = addGroup(groupAdmin, "appInfo", 1); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "appInfo").build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next()); + AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + //verify it was assigned to the right group + assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); + //verify prop was not stored as part of the schema + assertNull(admin.getTableDescriptor(tableName).getValue(GroupInfo.TABLEDESC_PROP_GROUP)); + } + + @Test + public void testDefaultNamespaceCreateAndAssign() throws Exception { + LOG.info("testDefaultNamespaceCreateAndAssign"); + final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign"); + admin.modifyNamespace(NamespaceDescriptor.create("default") + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "default").build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + } + + @Test + public void testNamespaceConstraint() throws Exception { + String nsName = tablePrefix+"_foo"; + String groupName = tablePrefix+"_foo"; + LOG.info("testNamespaceConstraint"); + //verify we can't create a namespace without a group assigned + try { + admin.createNamespace(NamespaceDescriptor.create(nsName).build()); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + groupAdmin.addGroup(groupName); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, groupName) + .build()); + //test removing a referenced group + try { + groupAdmin.removeGroup(groupName); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + //test modify group + //changing with the same name is fine + admin.modifyNamespace( + NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, groupName) + .build()); + String anotherGroup = tablePrefix+"_anotherGroup"; + groupAdmin.addGroup(anotherGroup); + //changing to a new group is not + try { + admin.modifyNamespace( + NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, anotherGroup) + .build()); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + //test add non-existent group + admin.deleteNamespace(nsName); + groupAdmin.removeGroup(groupName); + try { + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "foo") + .build()); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + } + + @Test + public void testGroupInfoMultiAccessing() throws Exception { + GroupInfoManager manager = ((GroupAdminImpl) groupEndpoint.getGroupAdmin()) + .getGroupInfoManager(); + final GroupInfo defaultGroup = manager.getGroup("default"); + // getGroup updates default group's server list + // this process must not affect other threads iterating the list + Iterator it = defaultGroup.getServers().iterator(); + manager.getGroup("default"); + it.next(); + } + + @Test + public void testTracker() throws IOException, InterruptedException { + LOG.info("testTracker"); + ZooKeeperWatcher watcher = + new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "testTracker", null); + GroupTracker tracker = new GroupTracker(watcher, null); + try { + final Map groupMap = new ConcurrentHashMap(); + final AtomicBoolean stateChanged = new AtomicBoolean(false); + GroupTracker.Listener listener = new GroupTracker.Listener() { + + @Override + public void groupMapChanged(Map map) { + groupMap.clear(); + groupMap.putAll(map); + stateChanged.set(true); + } + }; + tracker.addListener(listener); + tracker.start(); + + //wait for tracker to retrieve initial info + tracker.blockUntilReady(0); + int tries = 60000/100; + while(groupMap.size() < 1 && tries > 0) { + Thread.sleep(100); + tries--; + } + assertNotSame(0, tries); + assertNotNull(groupAdmin.getGroupInfo("default")); + + stateChanged.set(false); + groupAdmin.addGroup("foo"); + while(!stateChanged.get()) { + Thread.sleep(100); + } + stateChanged.set(false); + assertEquals(3, groupMap.size()); + assertNotNull(tracker.getGroup("foo")); + assertEquals(0, tracker.getGroup("foo").getServers().size()); + + addGroup(groupAdmin, "bar", 1); + while(!stateChanged.get()) { + Thread.sleep(100); + } + stateChanged.set(false); + assertEquals(4, groupMap.size()); + assertNotNull(tracker.getGroup("bar")); + assertEquals(1, tracker.getGroup("bar").getServers().size()); + } finally { + if(tracker != null) { + tracker.stop(); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java new file mode 100644 index 0000000..82265a2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java @@ -0,0 +1,612 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; + +import java.io.IOException; +import java.security.SecureRandom; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public abstract class TestGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestGroupsBase.class); + + //shared + protected final static String groupPrefix = "Group"; + protected final static String tablePrefix = "Group"; + protected final static SecureRandom rand = new SecureRandom(); + + //shared, cluster type specific + protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseAdmin admin; + protected static HBaseCluster cluster; + protected static GroupAdminClient groupAdmin; + + public final static long WAIT_TIMEOUT = 60000*5; + public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster + + + + protected GroupInfo addGroup(GroupAdminClient gAdmin, String groupName, + int serverCount) throws IOException, InterruptedException { + GroupInfo defaultInfo = gAdmin + .getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertTrue(defaultInfo != null); + assertTrue(defaultInfo.getServers().size() >= serverCount); + gAdmin.addGroup(groupName); + + Set set = new HashSet(); + for(String server: defaultInfo.getServers()) { + if(set.size() == serverCount) { + break; + } + set.add(server); + } + gAdmin.moveServers(set, groupName); + GroupInfo result = gAdmin.getGroupInfo(groupName); + assertTrue(result.getServers().size() >= serverCount); + return result; + } + + static void removeGroup(GroupAdminClient groupAdmin, String groupName) throws IOException { + GroupInfo groupInfo = groupAdmin.getGroupInfo(groupName); + groupAdmin.moveTables(groupInfo.getTables(), GroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(groupInfo.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(groupName); + } + + protected void deleteTableIfNecessary() throws IOException { + for (HTableDescriptor desc : TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) { + TEST_UTIL.deleteTable(desc.getName()); + } + } + + protected void deleteNamespaceIfNecessary() throws IOException { + for (NamespaceDescriptor desc : TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors()) { + if(desc.getName().startsWith(tablePrefix)) { + admin.deleteNamespace(desc.getName()); + } + } + } + + protected void deleteGroups() throws IOException { + GroupAdminClient groupAdmin = new GroupAdminClient(TEST_UTIL.getConfiguration()); + for(GroupInfo group: groupAdmin.listGroups()) { + if(!group.getName().equals(GroupInfo.DEFAULT_GROUP)) { + groupAdmin.moveTables(group.getTables(), GroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(group.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(group.getName()); + } + } + } + + public Map> getTableRegionMap() throws IOException { + Map> map = Maps.newTreeMap(); + Map>> tableServerRegionMap + = getTableServerRegionMap(); + for(TableName tableName : tableServerRegionMap.keySet()) { + if(!map.containsKey(tableName)) { + map.put(tableName, new LinkedList()); + } + for(List subset: tableServerRegionMap.get(tableName).values()) { + map.get(tableName).addAll(subset); + } + } + return map; + } + + public Map>> getTableServerRegionMap() + throws IOException { + Map>> map = Maps.newTreeMap(); + ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus(); + for(ServerName serverName : status.getServers()) { + for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) { + TableName tableName = HRegionInfo.getTable(rl.getName()); + if(!map.containsKey(tableName)) { + map.put(tableName, new TreeMap>()); + } + if(!map.get(tableName).containsKey(serverName)) { + map.get(tableName).put(serverName, new LinkedList()); + } + map.get(tableName).get(serverName).add(rl.getNameAsString()); + } + } + return map; + } + + @Test(expected = ConstraintException.class) + public void testGroupInfoOfTableNonExistent() throws Exception { + groupAdmin.getGroupInfoOfTable(TableName.valueOf("nonexistent")); + } + + @Test + public void testCreateMultiRegion() throws IOException { + LOG.info("testCreateMultiRegion"); + byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateMultiRegion"); + byte[] end = {1,3,5,7,9}; + byte[] start = {0,2,4,6,8}; + byte[][] f = {Bytes.toBytes("f")}; + TEST_UTIL.createTable(tableName, f,1,start,end,10); + } + + @Test + public void testCreateAndDrop() throws Exception { + LOG.info("testCreateAndDrop"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndDrop"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("cf")); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(tableName) != null; + } + }); + TEST_UTIL.deleteTable(tableName); + } + + @Test + public void testCreateAndAssign() throws Exception { + LOG.info("testCreateAndAssign"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndAssign"); + GroupInfo appInfo = addGroup(groupAdmin, "appInfo", 1); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + desc.setValue(GroupInfo.TABLEDESC_PROP_GROUP, appInfo.getName()); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + + + ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next()); + AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + //verify it was assigned to the right group + assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); + //verify prop was not stored as part of the schema + assertNull(admin.getTableDescriptor(tableName).getValue(GroupInfo.TABLEDESC_PROP_GROUP)); + } + + @Test + public void testCreateAndAssignSameGroup() throws Exception { + LOG.info("testCreateAndAssignSameGroup"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndAssignSameGroup"); + GroupInfo appInfo = addGroup(groupAdmin, "appInfo", 1); + groupAdmin.moveTables(Sets.newHashSet(tableName), appInfo.getName()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + desc.setValue(GroupInfo.TABLEDESC_PROP_GROUP, appInfo.getName()); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + ServerName targetServer = ServerName.parseServerName(appInfo.getServers().iterator().next()); + AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + //verify it was assigned to the right group + assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); + //verify prop was not stored as part of the schema + assertNull(admin.getTableDescriptor(tableName).getValue(GroupInfo.TABLEDESC_PROP_GROUP)); + } + + @Test + public void testSimpleRegionServerMove() throws IOException, + InterruptedException { + LOG.info("testSimpleRegionServerMove"); + + GroupInfo appInfo = addGroup(groupAdmin, groupPrefix + rand.nextInt(), 1); + GroupInfo adminInfo = addGroup(groupAdmin, groupPrefix + rand.nextInt(), 1); + GroupInfo dInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertEquals(4, groupAdmin.listGroups().size()); + assertEquals(1, adminInfo.getServers().size()); + assertEquals(1, appInfo.getServers().size()); + assertEquals(admin.getClusterStatus().getServers().size() - 3, dInfo.getServers().size()); + groupAdmin.moveServers(appInfo.getServers(), + GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(appInfo.getName()); + groupAdmin.moveServers(adminInfo.getServers(), + GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(adminInfo.getName()); + assertEquals(groupAdmin.listGroups().size(), 2); + } + + @Test + public void testMoveServers() throws Exception { + LOG.info("testMoveServers"); + + //create groups and assign servers + addGroup(groupAdmin, "bar", 3); + groupAdmin.addGroup("foo"); + + GroupInfo barGroup = groupAdmin.getGroupInfo("bar"); + GroupInfo fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(3, barGroup.getServers().size()); + assertEquals(0, fooGroup.getServers().size()); + + //test fail bogus server move + try { + groupAdmin.moveServers(Sets.newHashSet("foo:9999"),"foo"); + fail("Bogus servers shouldn't have been successfully moved."); + } catch(IOException ex) { + String exp = "Server foo:9999 is not an online server in default group."; + String msg = "Expected '"+exp+"' in exception message: "; + assertTrue(msg+" "+ex.getMessage(), ex.getMessage().contains(exp)); + } + + //test success case + LOG.info("moving servers "+barGroup.getServers()+" to group foo"); + groupAdmin.moveServers(barGroup.getServers(), fooGroup.getName()); + + barGroup = groupAdmin.getGroupInfo("bar"); + fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(0,barGroup.getServers().size()); + assertEquals(3,fooGroup.getServers().size()); + + LOG.info("moving servers "+fooGroup.getServers()+" to group default"); + groupAdmin.moveServers(fooGroup.getServers(), GroupInfo.DEFAULT_GROUP); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return admin.getClusterStatus().getServers().size() -1 == + groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size(); + } + }); + + fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(0,fooGroup.getServers().size()); + + //test group removal + LOG.info("Remove group "+barGroup.getName()); + groupAdmin.removeGroup(barGroup.getName()); + assertEquals(null, groupAdmin.getGroupInfo(barGroup.getName())); + LOG.info("Remove group "+fooGroup.getName()); + groupAdmin.removeGroup(fooGroup.getName()); + assertEquals(null, groupAdmin.getGroupInfo(fooGroup.getName())); + } + + @Test + public void testTableMoveAndDrop() throws Exception { + LOG.info("testTableMove"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testTableMoveAndDrop"); + final byte[] familyNameBytes = Bytes.toBytes("f"); + String newGroupName = "g_" + rand.nextInt(); + final GroupInfo newGroup = addGroup(groupAdmin, newGroupName, 2); + + HTable ht = TEST_UTIL.createTable(tableName, familyNameBytes); + assertEquals(4, + TEST_UTIL.createMultiRegions(TEST_UTIL.getConfiguration(), ht, familyNameBytes, 4)); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 5; + } + }); + + GroupInfo tableGrp = groupAdmin.getGroupInfoOfTable(tableName); + assertTrue(tableGrp.getName().equals(GroupInfo.DEFAULT_GROUP)); + + //change table's group + LOG.info("Moving table "+tableName+" to "+newGroup.getName()); + groupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); + + //verify group change + assertEquals(newGroup.getName(), + groupAdmin.getGroupInfoOfTable(tableName).getName()); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> serverMap = getTableServerRegionMap().get(tableName); + int count = 0; + if (serverMap != null) { + for (ServerName rs : serverMap.keySet()) { + if (newGroup.containsServer(rs.getHostAndPort())) { + count += serverMap.get(rs).size(); + } + } + } + return count == 5; + } + }); + + //verify removed table is removed from group + TEST_UTIL.deleteTable(tableName); + assertEquals(0, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size()); + } + + @Test + public void testGroupBalance() throws Exception { + LOG.info("testGroupBalance"); + + final TableName tableName = TableName.valueOf(tablePrefix+"_testGroupBalance"); + final byte[] familyNameBytes = Bytes.toBytes("f"); + String newGroupName = "g_" + rand.nextInt(); + final GroupInfo newGroup = addGroup(groupAdmin, newGroupName, 3); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + desc.setValue(GroupInfo.TABLEDESC_PROP_GROUP, newGroup.getName()); + byte [] startKey = Bytes.toBytes("aaaaa"); + byte [] endKey = Bytes.toBytes("zzzzz"); + admin.createTable(desc, startKey, endKey, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) { + return false; + } + return regions.size() >= 6; + } + }); + + //make assignment uneven, move all regions to one server + Map> assignMap = + getTableServerRegionMap().get(tableName); + final ServerName first = assignMap.entrySet().iterator().next().getKey(); + for(HRegionInfo region: admin.getTableRegions(tableName)) { + if(!assignMap.get(first).contains(region)) { + admin.move(region.getEncodedNameAsBytes(), Bytes.toBytes(first.getServerName())); + } + } + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> map = getTableServerRegionMap().get(tableName); + if (map == null) { + return true; + } + List regions = map.get(first); + if (regions == null) { + return true; + } + return regions.size() >= 6; + } + }); + + //balance the other group and make sure it doesn't affect the new group + groupAdmin.balanceGroup(GroupInfo.DEFAULT_GROUP); + assertEquals(6, getTableServerRegionMap().get(tableName).get(first).size()); + + groupAdmin.balanceGroup(newGroupName); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + for (List regions : getTableServerRegionMap().get(tableName).values()) { + if (2 != regions.size()) { + return false; + } + } + return true; + } + }); + } + + @Test + public void testRegionMove() throws Exception { + LOG.info("testRegionMove"); + + final GroupInfo newGroup = addGroup(groupAdmin, "g_" + rand.nextInt(), 1); + final TableName tableName = TableName.valueOf(tablePrefix + rand.nextInt()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + HTable ht = TEST_UTIL.createTable(tableName, familyNameBytes); + + // All the regions created below will be assigned to the default group. + assertEquals(5, TEST_UTIL.createMultiRegions(TEST_UTIL.getConfiguration(), ht, familyNameBytes, 5)); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 6; + } + }); + + //get target region to move + Map> assignMap = + getTableServerRegionMap().get(tableName); + String targetRegion = null; + for(ServerName server : assignMap.keySet()) { + targetRegion = assignMap.get(server).size() > 0 ? assignMap.get(server).get(0) : null; + if(targetRegion != null) { + break; + } + } + //get server which is not a member of new group + ServerName targetServer = null; + for(ServerName server : admin.getClusterStatus().getServers()) { + if(!newGroup.containsServer(server.getHostAndPort())) { + targetServer = server; + break; + } + } + + final AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + + //move target server to group + groupAdmin.moveServers(Sets.newHashSet(targetServer.getHostAndPort()), newGroup.getName()); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ProtobufUtil.getOnlineRegions(targetRS).size() <= 0; + } + }); + + // Lets move this region to the new group. + TEST_UTIL.getHBaseAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))), + Bytes.toBytes(targetServer.getServerName())); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return + getTableRegionMap().get(tableName) != null && + getTableRegionMap().get(tableName).size() == 6 && + admin.getClusterStatus().getRegionsInTransition().size() < 1; + } + }); + + //verify that targetServer didn't open it + assertFalse(ProtobufUtil.getOnlineRegions(targetRS).contains(targetRegion)); + } + + @Test + public void testFailRemoveGroup() throws IOException, InterruptedException { + LOG.info("testFailRemoveGroup"); + + addGroup(groupAdmin, "bar", 3); + TableName tableName = TableName.valueOf(tablePrefix+"_my_table"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); + groupAdmin.moveTables(Sets.newHashSet(tableName), "bar"); + GroupInfo barGroup = groupAdmin.getGroupInfo("bar"); + //group is not empty therefore it should fail + try { + groupAdmin.removeGroup(barGroup.getName()); + fail("Expected remove group to fail"); + } catch(IOException e) { + } + //group cannot lose all it's servers therefore it should fail + try { + groupAdmin.moveServers(barGroup.getServers(), GroupInfo.DEFAULT_GROUP); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + groupAdmin.moveTables(barGroup.getTables(), GroupInfo.DEFAULT_GROUP); + try { + groupAdmin.removeGroup(barGroup.getName()); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + groupAdmin.moveServers(barGroup.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(barGroup.getName()); + + assertEquals(2, groupAdmin.listGroups().size()); + } + + @Test + public void testKillRS() throws Exception { + LOG.info("testKillRS"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testKillRS"); + GroupInfo appInfo = addGroup(groupAdmin, "appInfo", 1); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + desc.setValue(GroupInfo.TABLEDESC_PROP_GROUP, appInfo.getName()); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + + ServerName targetServer = ServerName.parseServerName(appInfo.getServers().first()); + AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0); + assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + + try { + //stopping may cause an exception + //due to the connection loss + targetRS.stopServer(null, + AdminProtos.StopServerRequest.newBuilder().setReason("Die").build()); + } catch(Exception e) { + } + assertFalse(cluster.getClusterStatus().getServers().contains(targetServer)); + + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + TreeSet newServers = Sets.newTreeSet(); + newServers.add(groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().first()); + groupAdmin.moveServers(newServers, appInfo.getName()); + admin.assign(targetRegion.getRegionName()); + + //wait for region to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + + targetServer = ServerName.parseServerName(newServers.first()); + targetRS = + admin.getConnection().getAdmin(targetServer); + assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + assertEquals(tableName, + ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable()); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestSecureGroupAdminEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestSecureGroupAdminEndpoint.java new file mode 100644 index 0000000..6768848 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestSecureGroupAdminEndpoint.java @@ -0,0 +1,331 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import com.google.protobuf.ByteString; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.access.AccessControlLists; +import org.apache.hadoop.hbase.security.access.AccessController; +import org.apache.hadoop.hbase.security.access.SecureTestUtil; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.security.PrivilegedExceptionAction; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.spy; + +/** +* Performs authorization checks for common operations, according to different +* levels of authorized users. +*/ +@Category(MediumTests.class) +@SuppressWarnings("rawtypes") +public class TestSecureGroupAdminEndpoint { + private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static Configuration conf; + + // user with all permissions + private static User SUPERUSER; + // user granted with all global permission + private static User USER_ADMIN; + // user with rw permissions + private static User USER_RW; + // user with read-only permissions + private static User USER_RO; + // user is table owner. will have all permissions on table + private static User USER_OWNER; + // user with create table permissions alone + private static User USER_CREATE; + // user with no permissions + private static User USER_NONE; + + private static AccessController ACCESS_CONTROLLER; + private static GroupAdminEndpoint GROUP_ENDPOINT; + private static GroupMasterObserver GROUP_OBSERVER; + + private static GroupAdmin spy; + + @BeforeClass + public static void setupBeforeClass() throws Exception { + // setup configuration + conf = TEST_UTIL.getConfiguration(); + SecureTestUtil.enableSecurity(conf); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + GroupBasedLoadBalancer.class.getName()); + conf.set("hbase.coprocessor.master.classes", + conf.get("hbase.coprocessor.master.classes")+","+ + GroupAdminEndpoint.class.getName()+","+ + GroupMasterObserver.class.getName()); + + TEST_UTIL.startMiniCluster(1,2); + MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMasterCoprocessorHost(); + cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); + ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); + + GROUP_ENDPOINT = ((GroupAdminEndpoint) + TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(GroupAdminEndpoint.class.getName())); + spy = spy(GROUP_ENDPOINT.getGroupAdmin()); + GROUP_ENDPOINT.setGroupAdmin(spy); + + GROUP_OBSERVER = + (GroupMasterObserver) cpHost.findCoprocessor(GroupMasterObserver.class.getName()); + + // Wait for the ACL table to become available + TEST_UTIL.waitTableAvailable(AccessControlLists.ACL_TABLE_NAME.getName(), 60000); + //wait for balancer to come online + final HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + waitForCondition(new PrivilegedExceptionAction() { + @Override + public Boolean run() throws Exception { + return !master.isInitialized() || + !((GroupBasedLoadBalancer)master.getLoadBalancer()).isOnline(); + } + }); + + + + // create a set of test users + SUPERUSER = User.createUserForTesting(conf, "admin", new String[]{"supergroup"}); + USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]); + USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); + + // initialize access control + HTable meta = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); + AccessControlProtos.AccessControlService.BlockingInterface protocol = + AccessControlProtos.AccessControlService + .newBlockingStub(meta.coprocessorService(HConstants.EMPTY_START_ROW)); + + AccessControlProtos.GrantRequest req = + AccessControlProtos.GrantRequest.newBuilder().setUserPermission( + AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFrom(Bytes.toBytes(USER_ADMIN.getShortName()))) + .setPermission(AccessControlProtos.Permission.newBuilder() + .setType(AccessControlProtos.Permission.Type.Global) + .setGlobalPermission(AccessControlProtos.GlobalPermission.newBuilder() + .addAction(AccessControlProtos.Permission.Action.ADMIN) + .addAction(AccessControlProtos.Permission.Action.CREATE) + .addAction(AccessControlProtos.Permission.Action.READ) + .addAction(AccessControlProtos.Permission.Action.WRITE))) + ).build(); + protocol.grant(null, req); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + public void verifyAllowed(User user, PrivilegedExceptionAction... actions) throws Exception { + for (PrivilegedExceptionAction action : actions) { + try { + user.runAs(action); + } catch (AccessDeniedException ade) { + fail("Expected action to pass for user '" + user.getShortName() + "' but was denied"); + } + } + } + + public void verifyAllowed(PrivilegedExceptionAction action, User... users) throws Exception { + for (User user : users) { + verifyAllowed(user, action); + } + } + + public void verifyDenied(User user, PrivilegedExceptionAction... actions) throws Exception { + for (PrivilegedExceptionAction action : actions) { + try { + user.runAs(action); + fail("Expected AccessDeniedException for user '" + user.getShortName() + "'"); + } catch (RetriesExhaustedWithDetailsException e) { + // in case of batch operations, and put, the client assembles a + // RetriesExhaustedWithDetailsException instead of throwing an + // AccessDeniedException + boolean isAccessDeniedException = false; + for (Throwable ex : e.getCauses()) { + if (ex instanceof AccessDeniedException) { + isAccessDeniedException = true; + break; + } + } + if (!isAccessDeniedException) { + fail("Not receiving AccessDeniedException for user '" + user.getShortName() + "'"); + } + } catch (AccessDeniedException ade) { + // expected result + } + } + } + + public void verifyDenied(PrivilegedExceptionAction action, User... users) throws Exception { + for (User user : users) { + verifyDenied(user, action); + } + } + + @Test + public void testGetAddRemove() throws Exception { + final AtomicLong counter = new AtomicLong(0); + + PrivilegedExceptionAction getGroup = new PrivilegedExceptionAction() { + public Object run() throws Exception { + new GroupAdminClient(conf).getGroupInfo("default"); + return null; + } + }; + verifyAllowed(getGroup, SUPERUSER, USER_ADMIN, USER_NONE); + + PrivilegedExceptionAction addGroup = new PrivilegedExceptionAction() { + public Object run() throws Exception { + //new to create a new client everytime so the correct user is propagated to the server + new GroupAdminClient(conf).addGroup("testGetAddRemove" + counter.incrementAndGet()); + return null; + } + }; + verifyDenied(addGroup, USER_NONE); + verifyAllowed(addGroup, SUPERUSER, USER_ADMIN); + + PrivilegedExceptionAction removeGroup = new PrivilegedExceptionAction() { + public Object run() throws Exception { + new GroupAdminClient(conf).removeGroup("testGetAddRemove" + counter.getAndDecrement()); + return null; + } + }; + verifyAllowed(removeGroup, SUPERUSER, USER_ADMIN); + verifyDenied(removeGroup, USER_NONE); + } + + @Test + public void testMoveServer() throws Exception { + PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + public Object run() throws Exception { + String hostPort = "foo:123"; + Set set = new TreeSet(); + set.add(hostPort); + doNothing().when(spy).moveServers(any(Set.class),anyString()); + new GroupAdminClient(conf).moveServers(set, "testMoveServer"); + return null; + } + }; + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_NONE); + } + + @Test + public void testMoveTable() throws Exception { + PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + public Object run() throws Exception { + doNothing().when(spy).moveTables(any(Set.class),anyString()); + new GroupAdminClient(conf).moveTables(Sets.newHashSet(TableName.valueOf("testMoveTable")), "default"); + return null; + } + }; + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_NONE); + } + + @Test + public void testListGroups() throws Exception { + PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + public Object run() throws Exception { + new GroupAdminClient(conf).listGroups(); + return null; + } + }; + verifyAllowed(action, SUPERUSER, USER_ADMIN,USER_NONE); + } + + @Test + public void testCreateAndAssign() throws Exception { + final String nsName = "ns_testCreateAndAssign"; + final AtomicInteger counter = new AtomicInteger(0); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "default").build()); + + PrivilegedExceptionAction action = new PrivilegedExceptionAction() { + public Object run() throws Exception { + final TableName tableName = + TableName.valueOf(nsName, "testCreateAndAssign_"+counter.incrementAndGet()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + GROUP_OBSERVER.preCreateTable(null, desc, null); + return null; + } + }; + verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_NONE); + + action = new PrivilegedExceptionAction() { + public Object run() throws Exception { + final TableName tableName = + TableName.valueOf(nsName, "testCreateAndAssign_"+counter.incrementAndGet()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + desc.setValue(GroupInfo.TABLEDESC_PROP_GROUP, "default"); + GROUP_OBSERVER.preCreateTable(null, desc, null); + return null; + } + }; + verifyAllowed(action, SUPERUSER, USER_ADMIN); + verifyDenied(action, USER_NONE); + } + + private static void waitForCondition(PrivilegedExceptionAction action) throws Exception { + long sleepInterval = 100; + long timeout = 2*60000; + long tries = timeout/sleepInterval; + while(action.run()) { + Thread.sleep(sleepInterval); + if(tries-- < 0) { + fail("Timeout"); + } + } + } +} + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java new file mode 100644 index 0000000..2825184 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java @@ -0,0 +1,126 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.junit.Assert; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; + +public class VerifyingGroupAdminClient extends GroupAdminClient { + private HTable table; + private ZooKeeperWatcher zkw; + private GroupSerDe serDe; + + public VerifyingGroupAdminClient(Configuration conf) + throws IOException { + super(conf); + table = new HTable(conf, GroupInfoManager.GROUP_TABLE_NAME_BYTES); + zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null); + serDe = new GroupSerDe(); + } + + @Override + public void addGroup(String groupName) throws IOException { + super.addGroup(groupName); + verify(); + } + + @Override + public void moveServers(Set servers, String targetGroup) throws IOException { + super.moveServers(servers, targetGroup); + verify(); + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + super.moveTables(tables, targetGroup); + verify(); + } + + @Override + public void removeGroup(String name) throws IOException { + super.removeGroup(name); + verify(); + } + + public void verify() throws IOException { + Get get = new Get(GroupInfoManager.ROW_KEY); + get.addFamily(GroupInfoManager.META_FAMILY_BYTES); + Map groupMap = Maps.newHashMap(); + Set zList = Sets.newHashSet(); + + Result result = table.get(get); + if(!result.isEmpty()) { + NavigableMap> dataMap = + result.getNoVersionMap(); + for(byte[] groupNameBytes: + dataMap.get(GroupInfoManager.META_FAMILY_BYTES).keySet()) { + RSGroupProtos.GroupInfo proto = + RSGroupProtos.GroupInfo.parseFrom( + dataMap.get(GroupInfoManager.META_FAMILY_BYTES).get(groupNameBytes)); + GroupInfo groupInfo = serDe.toPojo(proto); + groupMap.put(groupInfo.getName(), groupInfo); + } + } + Assert.assertEquals(Sets.newHashSet(groupMap.values()), + Sets.newHashSet(super.listGroups())); + try { + String groupBasePath = ZKUtil.joinZNode(zkw.baseZNode, "groupInfo"); + for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { + byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + zList.add(serDe.toPojo(RSGroupProtos.GroupInfo.parseFrom(bis))); + } + } + Assert.assertEquals(zList.size(), groupMap.size()); + for(GroupInfo groupInfo: zList) { + Assert.assertTrue(groupMap.get(groupInfo.getName()).equals(groupInfo)); + } + } catch (KeeperException e) { + throw new IOException("ZK verification failed", e); + } catch (DeserializationException e) { + throw new IOException("ZK verification failed", e); + } catch (InterruptedException e) { + throw new IOException("ZK verification failed", e); + } + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 27c7073..1ff2491 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -27,12 +27,16 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -530,7 +534,7 @@ public class TestAssignmentManagerOnCluster { desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta, hri); - MyLoadBalancer.controledRegion = hri.getEncodedName(); + MyLoadBalancer.controledRegion = hri; HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); @@ -554,6 +558,105 @@ public class TestAssignmentManagerOnCluster { TEST_UTIL.deleteTable(Bytes.toBytes(table)); } } + + /** + * This tests round-robin assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRoundRobinAssignmentFailed() throws Exception { + String table = "testRoundRobinAssignmentFailed"; + try { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri; + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + // round-robin assignment but balancer cannot find a plan + // assignment should fail + am.assign(Arrays.asList(hri)); + + // if bulk assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(Arrays.asList(hri)); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(Bytes.toBytes(table)); + } + } + + /** + * This tests retain assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRetainAssignmentFailed() throws Exception { + String table = "testRetainAssignmentFailed"; + try { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri; + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + + Map regions = new HashMap(); + ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + regions.put(hri, dest); + // retainAssignment but balancer cannot find a plan + // assignment should fail + am.assign(regions); + + // if retain assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try retainAssigment again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(regions); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + + // it retains on same server as specified + assertEquals(serverName, dest); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(Bytes.toBytes(table)); + } + } /** * This tests region open failure which is not recoverable @@ -1174,7 +1277,7 @@ public class TestAssignmentManagerOnCluster { static class MyLoadBalancer extends StochasticLoadBalancer { // For this region, if specified, always assign to nowhere - static volatile String controledRegion = null; + static volatile HRegionInfo controledRegion = null; static volatile Integer countRegionServers = null; static AtomicInteger counter = new AtomicInteger(0); @@ -1182,7 +1285,7 @@ public class TestAssignmentManagerOnCluster { @Override public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { - if (regionInfo.getEncodedName().equals(controledRegion)) { + if (regionInfo.equals(controledRegion)) { return null; } return super.randomAssignment(regionInfo, servers); @@ -1200,8 +1303,26 @@ public class TestAssignmentManagerOnCluster { return null; } } + if (regions.get(0).equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, regions); + return m; + } return super.roundRobinAssignment(regions, servers); } + + @Override + public Map> retainAssignment( + Map regions, List servers) { + for (HRegionInfo hri : regions.keySet()) { + if (hri.equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, Lists.newArrayList(regions.keySet())); + return m; + } + } + return super.retainAssignment(regions, servers); + } } public static class MyMaster extends HMaster { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 9bf3d10..788675f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -371,32 +371,29 @@ public class TestCatalogJanitor { @Override public void createNamespace(NamespaceDescriptor descriptor) throws IOException { - //To change body of implemented methods use File | Settings | File Templates. } @Override public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException { - //To change body of implemented methods use File | Settings | File Templates. } @Override public void deleteNamespace(String name) throws IOException { - //To change body of implemented methods use File | Settings | File Templates. } @Override public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override public List listNamespaceDescriptors() throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override public List listTableDescriptorsByNamespace(String name) throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override @@ -405,6 +402,11 @@ public class TestCatalogJanitor { } @Override + public LoadBalancer getLoadBalancer() { + return null; + } + + @Override public void deleteTable(TableName tableName) throws IOException { } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java new file mode 100644 index 0000000..d8bd3b1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java @@ -0,0 +1,589 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Lists; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.group.GroupBasedLoadBalancer; +import org.apache.hadoop.hbase.group.GroupInfo; +import org.apache.hadoop.hbase.group.GroupInfoManager; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +//TODO use stochastic based load balancer instead +@Category(SmallTests.class) +public class TestGroupBasedLoadBalancer { + + private static final Log LOG = LogFactory.getLog(TestGroupBasedLoadBalancer.class); + private static GroupBasedLoadBalancer loadBalancer; + private static SecureRandom rand; + + static String[] groups = new String[] { GroupInfo.DEFAULT_GROUP, "dg2", "dg3", + "dg4" }; + static TableName[] tables = + new TableName[] { TableName.valueOf("dt1"), + TableName.valueOf("dt2"), + TableName.valueOf("dt3"), + TableName.valueOf("dt4")}; + static List servers; + static Map groupMap; + static Map tableMap; + static List tableDescs; + int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 }; + static int regionId = 0; + + @BeforeClass + public static void beforeAllTests() throws Exception { + rand = new SecureRandom(); + servers = generateServers(7); + groupMap = constructGroupInfo(servers, groups); + tableMap = new HashMap(); + tableDescs = constructTableDesc(); + Configuration conf = HBaseConfiguration.create(); + conf.set("hbase.regions.slop", "0"); + conf.set("hbase.group.grouploadbalancer.class", SimpleLoadBalancer.class.getCanonicalName()); + loadBalancer = new GroupBasedLoadBalancer(getMockedGroupInfoManager()); + loadBalancer.setMasterServices(getMockedMaster()); + loadBalancer.setConf(conf); + loadBalancer.initialize(); + } + + /** + * Test the load balancing algorithm. + * + * Invariant is that all servers of the group should be hosting either floor(average) or + * ceiling(average) + * + * @throws Exception + */ + @Test + public void testBalanceCluster() throws Exception { + Map> servers = mockClusterServers(); + ArrayListMultimap list = convertToGroupBasedMap(servers); + LOG.info("Mock Cluster : " + printStats(list)); + List plans = loadBalancer.balanceCluster(servers); + ArrayListMultimap balancedCluster = reconcile( + list, plans); + LOG.info("Mock Balance : " + printStats(balancedCluster)); + assertClusterAsBalanced(balancedCluster); + } + + /** + * Invariant is that all servers of a group have load between floor(avg) and + * ceiling(avg) number of regions. + */ + private void assertClusterAsBalanced( + ArrayListMultimap groupLoadMap) { + for (String gName : groupLoadMap.keySet()) { + List groupLoad = groupLoadMap.get(gName); + int numServers = groupLoad.size(); + int numRegions = 0; + int maxRegions = 0; + int minRegions = Integer.MAX_VALUE; + for (ServerAndLoad server : groupLoad) { + int nr = server.getLoad(); + if (nr > maxRegions) { + maxRegions = nr; + } + if (nr < minRegions) { + minRegions = nr; + } + numRegions += nr; + } + if (maxRegions - minRegions < 2) { + // less than 2 between max and min, can't balance + return; + } + int min = numRegions / numServers; + int max = numRegions % numServers == 0 ? min : min + 1; + + for (ServerAndLoad server : groupLoad) { + assertTrue(server.getLoad() <= max); + assertTrue(server.getLoad() >= min); + } + } + } + + /** + * Tests immediate assignment. + * + * Invariant is that all regions have an assignment. + * + * @throws Exception + */ + @Test + public void testImmediateAssignment() throws Exception { + List regions = randomRegions(20); + Map assignments = loadBalancer + .immediateAssignment(regions, servers); + assertImmediateAssignment(regions, servers, assignments); + } + + /** + * All regions have an assignment. + * + * @param regions + * @param servers + * @param assignments + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertImmediateAssignment(List regions, + List servers, Map assignments) + throws FileNotFoundException, IOException { + for (HRegionInfo region : regions) { + assertTrue(assignments.containsKey(region)); + ServerName server = assignments.get(region); + TableName tableName = region.getTable(); + + String groupName = + loadBalancer.getGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(server.getHostAndPort())); + } + } + + /** + * Tests the bulk assignment used during cluster startup. + * + * Round-robin. Should yield a balanced cluster so same invariant as the + * load balancer holds, all servers holding either floor(avg) or + * ceiling(avg). + * + * @throws Exception + */ + @Test + public void testBulkAssignment() throws Exception { + List regions = randomRegions(25); + Map> assignments = loadBalancer + .roundRobinAssignment(regions, servers); + //test empty region/servers scenario + //this should not throw an NPE + loadBalancer.roundRobinAssignment(regions, + Collections.EMPTY_LIST); + //test regular scenario + assertTrue(assignments.keySet().size() == servers.size()); + for (ServerName sn : assignments.keySet()) { + List regionAssigned = assignments.get(sn); + for (HRegionInfo region : regionAssigned) { + TableName tableName = region.getTable(); + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(sn.getHostAndPort())); + } + } + ArrayListMultimap loadMap = convertToGroupBasedMap(assignments); + assertClusterAsBalanced(loadMap); + } + + /** + * Test the cluster startup bulk assignment which attempts to retain + * assignment info. + * + * @throws Exception + */ + @Test + public void testRetainAssignment() throws Exception { + // Test simple case where all same servers are there + Map> currentAssignments = mockClusterServers(); + Map inputForTest = new HashMap(); + for (ServerName sn : currentAssignments.keySet()) { + for (HRegionInfo region : currentAssignments.get(sn)) { + inputForTest.put(region, sn); + } + } + //verify region->null server assignment is handled + inputForTest.put(randomRegions(1).get(0), null); + Map> newAssignment = loadBalancer + .retainAssignment(inputForTest, servers); + assertRetainedAssignment(inputForTest, servers, newAssignment); + } + + /** + * Asserts a valid retained assignment plan. + *

+ * Must meet the following conditions: + *

    + *
  • Every input region has an assignment, and to an online server + *
  • If a region had an existing assignment to a server with the same + * address a a currently online server, it will be assigned to it + *
+ * + * @param existing + * @param assignment + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertRetainedAssignment( + Map existing, List servers, + Map> assignment) + throws FileNotFoundException, IOException { + // Verify condition 1, every region assigned, and to online server + Set onlineServerSet = new TreeSet(servers); + Set assignedRegions = new TreeSet(); + for (Map.Entry> a : assignment.entrySet()) { + assertTrue( + "Region assigned to server that was not listed as online", + onlineServerSet.contains(a.getKey())); + for (HRegionInfo r : a.getValue()) + assignedRegions.add(r); + } + assertEquals(existing.size(), assignedRegions.size()); + + // Verify condition 2, every region must be assigned to correct server. + Set onlineHostNames = new TreeSet(); + for (ServerName s : servers) { + onlineHostNames.add(s.getHostname()); + } + + for (Map.Entry> a : assignment.entrySet()) { + ServerName currentServer = a.getKey(); + for (HRegionInfo r : a.getValue()) { + ServerName oldAssignedServer = existing.get(r); + TableName tableName = r.getTable(); + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(currentServer.getHostAndPort())); + if (oldAssignedServer != null + && onlineHostNames.contains(oldAssignedServer + .getHostname())) { + // this region was previously assigned somewhere, and that + // host is still around, then the host must have been is a + // different group. + if (oldAssignedServer.getHostAndPort().equals( + currentServer.getHostAndPort()) == false) { + assertFalse(gInfo.containsServer(oldAssignedServer + .getHostAndPort())); + } + } + } + } + } + + private String printStats( + ArrayListMultimap groupBasedLoad) { + StringBuffer sb = new StringBuffer(); + sb.append("\n"); + for (String groupName : groupBasedLoad.keySet()) { + sb.append("Stats for group: " + groupName); + sb.append("\n"); + sb.append(groupMap.get(groupName).getServers()); + sb.append("\n"); + List groupLoad = groupBasedLoad.get(groupName); + int numServers = groupLoad.size(); + int totalRegions = 0; + sb.append("Per Server Load: \n"); + for (ServerAndLoad sLoad : groupLoad) { + sb.append("Server :" + sLoad.getServerName() + " Load : " + + sLoad.getLoad() + "\n"); + totalRegions += sLoad.getLoad(); + } + sb.append(" Group Statistics : \n"); + float average = (float) totalRegions / numServers; + int max = (int) Math.ceil(average); + int min = (int) Math.floor(average); + sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + + average + " max=" + max + " min=" + min + "]"); + sb.append("\n"); + sb.append("==============================="); + sb.append("\n"); + } + return sb.toString(); + } + + private ArrayListMultimap convertToGroupBasedMap( + final Map> serversMap) throws IOException { + ArrayListMultimap loadMap = ArrayListMultimap + .create(); + for (GroupInfo gInfo : getMockedGroupInfoManager().listGroups()) { + Set groupServers = gInfo.getServers(); + for (String hostAndPort : groupServers) { + ServerName actual = null; + for(ServerName entry: servers) { + if(entry.getHostAndPort().equals(hostAndPort)) { + actual = entry; + break; + } + } + List regions = serversMap.get(actual); + assertTrue("No load for " + actual, regions != null); + loadMap.put(gInfo.getName(), + new ServerAndLoad(actual, regions.size())); + } + } + return loadMap; + } + + private ArrayListMultimap reconcile( + ArrayListMultimap previousLoad, + List plans) { + ArrayListMultimap result = ArrayListMultimap + .create(); + result.putAll(previousLoad); + if (plans != null) { + for (RegionPlan plan : plans) { + ServerName source = plan.getSource(); + updateLoad(result, source, -1); + ServerName destination = plan.getDestination(); + updateLoad(result, destination, +1); + } + } + return result; + } + + private void updateLoad( + ArrayListMultimap previousLoad, + final ServerName sn, final int diff) { + for (String groupName : previousLoad.keySet()) { + ServerAndLoad newSAL = null; + ServerAndLoad oldSAL = null; + for (ServerAndLoad sal : previousLoad.get(groupName)) { + if (ServerName.isSameHostnameAndPort(sn, sal.getServerName())) { + oldSAL = sal; + newSAL = new ServerAndLoad(sn, sal.getLoad() + diff); + break; + } + } + if (newSAL != null) { + previousLoad.remove(groupName, oldSAL); + previousLoad.put(groupName, newSAL); + break; + } + } + } + + private Map> mockClusterServers() throws IOException { + assertTrue(servers.size() == regionAssignment.length); + Map> assignment = new TreeMap>(); + for (int i = 0; i < servers.size(); i++) { + int numRegions = regionAssignment[i]; + List regions = assignedRegions(numRegions, servers.get(i)); + assignment.put(servers.get(i), regions); + } + return assignment; + } + + /** + * Generate a list of regions evenly distributed between the tables. + * + * @param numRegions The number of regions to be generated. + * @return List of HRegionInfo. + */ + private List randomRegions(int numRegions) { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + rand.nextBytes(start); + rand.nextBytes(end); + int regionIdx = rand.nextInt(tables.length); + for (int i = 0; i < numRegions; i++) { + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + int tableIndex = (i + regionIdx) % tables.length; + HRegionInfo hri = new HRegionInfo( + tables[tableIndex], start, end, false, regionId++); + regions.add(hri); + } + return regions; + } + + /** + * Generate assigned regions to a given server using group information. + * + * @param numRegions the num regions to generate + * @param sn the servername + * @return the list of regions + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + private List assignedRegions(int numRegions, ServerName sn) throws IOException { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + for (int i = 0; i < numRegions; i++) { + TableName tableName = getTableName(sn); + HRegionInfo hri = new HRegionInfo( + tableName, start, end, false, + regionId++); + regions.add(hri); + } + return regions; + } + + private static List generateServers(int numServers) { + List servers = new ArrayList(numServers); + for (int i = 0; i < numServers; i++) { + String host = "server" + rand.nextInt(100000); + int port = rand.nextInt(60000); + servers.add(ServerName.valueOf(host, port, -1)); + } + return servers; + } + + /** + * Construct group info, with each group having at least one server. + * + * @param servers the servers + * @param groups the groups + * @return the map + */ + private static Map constructGroupInfo( + List servers, String[] groups) { + assertTrue(servers != null); + assertTrue(servers.size() >= groups.length); + int index = 0; + Map groupMap = new HashMap(); + for (String grpName : groups) { + GroupInfo groupInfo = new GroupInfo(grpName); + groupInfo.addServer(servers.get(index).getHostAndPort()); + groupMap.put(grpName, groupInfo); + index++; + } + while (index < servers.size()) { + int grpIndex = rand.nextInt(groups.length); + groupMap.get(groups[grpIndex]).addServer( + servers.get(index).getHostAndPort()); + index++; + } + return groupMap; + } + + /** + * Construct table descriptors evenly distributed between the groups. + * + * @return the list + */ + private static List constructTableDesc() { + List tds = Lists.newArrayList(); + int index = rand.nextInt(groups.length); + for (int i = 0; i < tables.length; i++) { + HTableDescriptor htd = new HTableDescriptor(tables[i]); + int grpIndex = (i + index) % groups.length ; + String groupName = groups[grpIndex]; + tableMap.put(tables[i], groupName); + tds.add(htd); + } + return tds; + } + + private static MasterServices getMockedMaster() throws IOException { + TableDescriptors tds = Mockito.mock(TableDescriptors.class); + Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); + Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); + Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); + Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); + MasterServices services = Mockito.mock(HMaster.class); + Mockito.when(services.getTableDescriptors()).thenReturn(tds); + AssignmentManager am = Mockito.mock(AssignmentManager.class); + Mockito.when(services.getAssignmentManager()).thenReturn(am); + return services; + } + + private static GroupInfoManager getMockedGroupInfoManager() throws IOException { + GroupInfoManager gm = Mockito.mock(GroupInfoManager.class); + Mockito.when(gm.getGroup(groups[0])).thenReturn( + groupMap.get(groups[0])); + Mockito.when(gm.getGroup(groups[1])).thenReturn( + groupMap.get(groups[1])); + Mockito.when(gm.getGroup(groups[2])).thenReturn( + groupMap.get(groups[2])); + Mockito.when(gm.getGroup(groups[3])).thenReturn( + groupMap.get(groups[3])); + Mockito.when(gm.listGroups()).thenReturn( + Lists.newLinkedList(groupMap.values())); + Mockito.when(gm.isOnline()).thenReturn(true); + Mockito.when(gm.getGroupOfTable(Mockito.any(TableName.class))) + .thenAnswer(new Answer() { + @Override + public String answer(InvocationOnMock invocation) throws Throwable { + return tableMap.get(invocation.getArguments()[0]); + } + }); + return gm; + } + + private TableName getTableName(ServerName sn) throws IOException { + TableName tableName = null; + GroupInfoManager gm = getMockedGroupInfoManager(); + GroupInfo groupOfServer = null; + for(GroupInfo gInfo : gm.listGroups()){ + if(gInfo.containsServer(sn.getHostAndPort())){ + groupOfServer = gInfo; + break; + } + } + + for(HTableDescriptor desc : tableDescs){ + if(gm.getGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){ + tableName = desc.getTableName(); + } + } + return tableName; + } +} diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb index c48a1c8..1f72654 100644 --- a/hbase-shell/src/main/ruby/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase.rb @@ -94,5 +94,6 @@ require 'hbase/quotas' require 'hbase/replication_admin' require 'hbase/security' require 'hbase/visibility_labels' +require 'hbase/group_admin' include HBaseQuotasConstants diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 52d9df7..874e025 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -379,6 +379,27 @@ module Hbase splits = h_table.getRegionLocations().keys().map{|i| Bytes.toString(i.getStartKey)}.delete_if{|k| k == ""}.to_java :String splits = org.apache.hadoop.hbase.util.Bytes.toByteArrays(splits) table_description = h_table.getTableDescriptor() + + #clone group + if(groups_available?(conf)) + group_admin = org.apache.hadoop.hbase.group.GroupAdminClient.new(@conf) + group_info = group_admin.getGroupInfoOfTable(table_name) + exp_group = group_info.getName + if(exp_group == "default") + exp_group = nil; + end + ns = + @admin.getNamespaceDescriptor( + org.apache.hadoop.hbase.TableName.valueOf(table_name).getNamespaceAsString) + ns_group = + ns.getValue(org.apache.hadoop.hbase.group.GroupInfo::NAMESPACEDESC_PROP_GROUP) + if(!exp_group.nil? && ns_group.nil?|| (ns_group != exp_group)) + yield " - Preserving explicit group assignment to #{exp_group}" if block_given? + table_description.setValue(org.apache.hadoop.hbase.group.GroupInfo::TABLEDESC_PROP_GROUP, + group_info.getName()) + end + end + yield 'Disabling table...' if block_given? disable(table_name) diff --git a/hbase-shell/src/main/ruby/hbase/group_admin.rb b/hbase-shell/src/main/ruby/hbase/group_admin.rb new file mode 100644 index 0000000..2cf9d80 --- /dev/null +++ b/hbase-shell/src/main/ruby/hbase/group_admin.rb @@ -0,0 +1,134 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java +java_import org.apache.hadoop.hbase.util.Pair + +# Wrapper for org.apache.hadoop.hbase.group.GroupAdminClient +# Which is an API to manage region server groups + +module Hbase + class GroupAdmin + include HBaseConstants + + def initialize(configuration, formatter) + @admin = org.apache.hadoop.hbase.group.GroupAdminClient.new(configuration) + @conf = configuration + @formatter = formatter + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of groups in hbase + def listGroups + @admin.listGroups.map { |g| g.getName } + end + #---------------------------------------------------------------------------------------------- + # get a group's information + def getGroup(group_name) + group = @admin.getGroupInfo(group_name) + res = {} + if block_given? + yield("Servers:") + else + res += v + end + group.getServers.each do |v| + if block_given? + yield(v) + else + res += v + end + end + if block_given? + yield("Tables:") + else + res += v + end + group.getTables.each do |v| + if block_given? + yield(v.toString) + else + res += v.toString + end + end + end + #---------------------------------------------------------------------------------------------- + # add a group + def addGroup(group_name) + @admin.addGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # remove a group + def removeGroup(group_name) + @admin.removeGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # balance a group + def balanceGroup(group_name) + @admin.balanceGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # move server to a group + def moveServers(dest, *args) + servers = java.util.HashSet.new() + args[0].each do |s| + servers.add(s) + end + @admin.moveServers(servers, dest) + end + #---------------------------------------------------------------------------------------------- + # move server to a group + def moveTables(dest, *args) + tables = java.util.HashSet.new(); + args[0].each do |s| + tables.add(org.apache.hadoop.hbase.TableName.valueOf(s)) + end + @admin.moveTables(tables,dest) + end + #---------------------------------------------------------------------------------------------- + # get group of server + def getGroupOfServer(server) + @admin.getGroupOfServer(server) + end + #---------------------------------------------------------------------------------------------- + # get group of server + def getGroupOfTable(table) + @admin.getGroupInfoOfTable(org.apache.hadoop.hbase.TableName.valueOf(table)) + end + #---------------------------------------------------------------------------------------------- + # get list tables of groups + def listTablesOfGroup(group_name) + @admin.listTablesOfGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # list servers in transition + def listServersInTransition() + iter = @admin.listServersInTransition.entrySet.iterator + while iter.hasNext + entry = iter.next + if block_given? + yield(entry.getKey, entry.getValue) + else + res[entry.getKey] = entry.getValue + end + end + end + end +end diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb index 9b4b44d..26bfa45 100644 --- a/hbase-shell/src/main/ruby/hbase/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase/hbase.rb @@ -45,6 +45,10 @@ module Hbase ::Hbase::Admin.new(configuration, formatter) end + def group_admin(formatter) + ::Hbase::GroupAdmin.new(configuration, formatter) + end + # Create new one each time def table(table, shell) ::Hbase::Table.new(configuration, table, shell) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 1c10bc1..d21f20c 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -87,6 +87,10 @@ module Shell @hbase_admin ||= hbase.admin(formatter) end + def group_admin + @group_admin ||= hbase.group_admin(formatter) + end + def hbase_table(name) hbase.table(name, self) end @@ -401,3 +405,22 @@ Shell.load_command_group( set_visibility ] ) + +Shell.load_command_group( + 'group', + :full_name => 'Groups', + :comment => "NOTE: Above commands are only applicable if running with the Groups setup", + :commands => %w[ + list_groups + get_group + add_group + remove_group + balance_group + move_group_servers + move_group_tables + get_server_group + get_table_group + list_group_tables + list_group_server_transitions + ] +) diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index 2128164..8eb4bbe 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -54,6 +54,10 @@ module Shell @shell.hbase_admin end + def group_admin + @shell.group_admin + end + def table(name) @shell.hbase_table(name) end diff --git a/hbase-shell/src/main/ruby/shell/commands/add_group.rb b/hbase-shell/src/main/ruby/shell/commands/add_group.rb new file mode 100644 index 0000000..7f91ee5 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/add_group.rb @@ -0,0 +1,39 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class AddGroup < Command + def help + return <<-EOF +Create a new region server group. + +Example: + + hbase> add_group 'my_group' +EOF + end + + def command(group_name) + group_admin.addGroup(group_name) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/balance_group.rb b/hbase-shell/src/main/ruby/shell/commands/balance_group.rb new file mode 100644 index 0000000..4c59f63 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/balance_group.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class BalanceGroup < Command + def help + return <<-EOF +Balance a region server group + + hbase> group_balance 'my_group' +EOF + end + + def command(group_name) + group_admin.balanceGroup(group_name) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_group.rb b/hbase-shell/src/main/ruby/shell/commands/get_group.rb new file mode 100644 index 0000000..5ed8226 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_group.rb @@ -0,0 +1,44 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetGroup < Command + def help + return <<-EOF +Get a region server group's information. + +Example: + + hbase> get_group 'default' +EOF + end + + def command(group_name) + now = Time.now + formatter.header([ "GROUP INFORMATION" ]) + group_admin.getGroup(group_name) do |s| + formatter.row([ s ]) + end + formatter.footer(now) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_group.rb b/hbase-shell/src/main/ruby/shell/commands/get_server_group.rb new file mode 100644 index 0000000..c78d4d2 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_server_group.rb @@ -0,0 +1,40 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetServerGroup < Command + def help + return <<-EOF +Get the group name the given region server is a member of. + + hbase> get_server_group 'server1:port1' +EOF + end + + def command(server) + now = Time.now + groupName = group_admin.getGroupOfServer(server).getName + formatter.row([ groupName ]) + formatter.footer(now,1) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_group.rb b/hbase-shell/src/main/ruby/shell/commands/get_table_group.rb new file mode 100644 index 0000000..dd8766d --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_table_group.rb @@ -0,0 +1,41 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetTableGroup < Command + def help + return <<-EOF +Get the group name the given table is a member of. + + hbase> get_table_group 'myTable' +EOF + end + + def command(table) + now = Time.now + groupName = + group_admin.getGroupOfTable(table).getName + formatter.row([ groupName ]) + formatter.footer(now,1) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb b/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb new file mode 100644 index 0000000..313873f --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb @@ -0,0 +1,44 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#TODO make this command name sho +module Shell + module Commands + class ListGroupServerTransitions < Command + def help + return <<-EOF +List region servers in transition. + +Example: + + hbase> list_group_server_transitions 'default' +EOF + end + def command() + now = Time.now + formatter.header(["Server", "Destination"]) + count = group_admin.listServersInTransition do |server, dest| + formatter.row([ server, dest ]) + end + formatter.footer(now, count) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb b/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb new file mode 100644 index 0000000..ae0862c --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb @@ -0,0 +1,45 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ListGroupTables < Command + def help + return <<-EOF +List member tables of a given region server group in hbase. + +Example: + + hbase> list_group_tables 'default' +EOF + end + + def command(group_name) + now = Time.now + formatter.header([ "TABLES" ]) + list = group_admin.listTablesOfGroup(group_name) + list.each do |table| + formatter.row([ table.toString ]) + end + formatter.footer(now, list.size) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/list_groups.rb b/hbase-shell/src/main/ruby/shell/commands/list_groups.rb new file mode 100644 index 0000000..2e7dd08 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/list_groups.rb @@ -0,0 +1,50 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ListGroups < Command + def help + return <<-EOF +List all region server groups. Optional regular expression parameter could +be used to filter the output. + +Example: + + hbase> list_groups + hbase> list_groups 'abc.*' +EOF + end + + def command(regex = ".*") + now = Time.now + formatter.header([ "GROUPS" ]) + + regex = /#{regex}/ unless regex.is_a?(Regexp) + list = group_admin.listGroups.grep(regex) + list.each do |group| + formatter.row([ group ]) + end + + formatter.footer(now, list.size) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb b/hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb new file mode 100644 index 0000000..5e5c850 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveGroupServers < Command + def help + return <<-EOF +Reassign a region server from one group to another. + + hbase> move_group_servers 'dest',['server1:port','server2:port'] +EOF + end + + def command(dest, *servers) + group_admin.moveServers(dest, *servers) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb b/hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb new file mode 100644 index 0000000..f495f2c --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveGroupTables < Command + def help + return <<-EOF +Reassign tables from one group to another. + + hbase> move_group_tables 'dest',['table1','table2'] +EOF + end + + def command(dest, *servers) + group_admin.moveTables(dest, *servers) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_group.rb b/hbase-shell/src/main/ruby/shell/commands/remove_group.rb new file mode 100644 index 0000000..66863a4 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/remove_group.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class RemoveGroup < Command + def help + return <<-EOF +Remove a group. + + hbase> remove_group 'my_group' +EOF + end + + def command(group_name) + group_admin.removeGroup(group_name) + end + end + end +end