diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java new file mode 100644 index 0000000..36bfa65 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java @@ -0,0 +1,120 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.net.HostAndPort; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Group user API interface used between client and server. + */ +@InterfaceAudience.Private +public abstract class GroupAdmin implements Closeable { + + /** + * Create a new GroupAdmin client + * @param conn + * @return a new GroupAdmin client + * @throws IOException + */ + public static GroupAdmin newClient(Connection conn) throws IOException { + return new GroupAdminClient(conn); + } + + /** + * Gets the group information. + * + * @param groupName the group name + * @return An instance of GroupInfo + */ + public abstract GroupInfo getGroupInfo(String groupName) throws IOException; + + /** + * Gets the group info of table. + * + * @param tableName the table name + * @return An instance of GroupInfo. + */ + public abstract GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException; + + /** + * Move a set of serves to another group + * + * + * @param servers set of servers, must be in the form HOST:PORT + * @param targetGroup the target group + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + public abstract void moveServers(Set servers, String targetGroup) throws IOException; + + /** + * Move tables to a new group. + * This will unassign all of a table's region so it can be reassigned to the correct group. + * @param tables list of tables to move + * @param targetGroup target group + * @throws java.io.IOException + */ + public abstract void moveTables(Set tables, String targetGroup) throws IOException; + + /** + * Add a new group + * @param name name of the group + * @throws java.io.IOException + */ + public abstract void addGroup(String name) throws IOException; + + /** + * Remove a group + * @param name name of the group + * @throws java.io.IOException + */ + public abstract void removeGroup(String name) throws IOException; + + /** + * Balance the regions in a group + * + * @param name the name of the gorup to balance + * @return + * @throws java.io.IOException + */ + public abstract boolean balanceGroup(String name) throws IOException; + + /** + * Lists the existing groups. + * + * @return Collection of GroupInfo. + */ + public abstract List listGroups() throws IOException; + + /** + * Retrieve the GroupInfo a server is affiliated to + * @param hostPort + * @return + * @throws java.io.IOException + */ + public abstract GroupInfo getGroupOfServer(HostAndPort hostPort) throws IOException; +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java new file mode 100644 index 0000000..fae3de3 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java @@ -0,0 +1,192 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import com.google.protobuf.ServiceException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.GroupProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +/** + * Client used for managing region server group information. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +class GroupAdminClient extends GroupAdmin { + private GroupAdminProtos.GroupAdminService.BlockingInterface proxy; + private static final Log LOG = LogFactory.getLog(GroupAdminClient.class); + + public GroupAdminClient(Connection conn) throws IOException { + proxy = GroupAdminProtos.GroupAdminService.newBlockingStub( + conn.getAdmin().coprocessorService()); + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + try { + GroupAdminProtos.GetGroupInfoResponse resp = + proxy.getGroupInfo(null, + GroupAdminProtos.GetGroupInfoRequest.newBuilder().setGroupName(groupName).build()); + if(resp.hasGroupInfo()) { + return ProtobufUtil.toGroupInfo(resp.getGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + GroupAdminProtos.GetGroupInfoOfTableRequest request = + GroupAdminProtos.GetGroupInfoOfTableRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + + try { + return ProtobufUtil.toGroupInfo(proxy.getGroupInfoOfTable(null, request).getGroupInfo()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void moveServers(Set servers, String targetGroup) throws IOException { + Set hostPorts = Sets.newHashSet(); + for(HostAndPort el: servers) { + hostPorts.add(HBaseProtos.HostPort.newBuilder() + .setHostName(el.getHostText()) + .setPort(el.getPort()) + .build()); + } + GroupAdminProtos.MoveServersRequest request = + GroupAdminProtos.MoveServersRequest.newBuilder() + .setTargetGroup(targetGroup) + .addAllServers(hostPorts).build(); + + try { + proxy.moveServers(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + GroupAdminProtos.MoveTablesRequest.Builder builder = + GroupAdminProtos.MoveTablesRequest.newBuilder() + .setTargetGroup(targetGroup); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + try { + proxy.moveTables(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void addGroup(String groupName) throws IOException { + GroupAdminProtos.AddGroupRequest request = + GroupAdminProtos.AddGroupRequest.newBuilder() + .setGroupName(groupName).build(); + try { + proxy.addGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void removeGroup(String name) throws IOException { + GroupAdminProtos.RemoveGroupRequest request = + GroupAdminProtos.RemoveGroupRequest.newBuilder() + .setGroupName(name).build(); + try { + proxy.removeGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public boolean balanceGroup(String name) throws IOException { + GroupAdminProtos.BalanceGroupRequest request = + GroupAdminProtos.BalanceGroupRequest.newBuilder() + .setGroupName(name).build(); + + try { + return proxy.balanceGroup(null, request).getBalanceRan(); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public List listGroups() throws IOException { + try { + List resp = + proxy.listGroupInfos(null, GroupAdminProtos.ListGroupInfosRequest.newBuilder().build()) + .getGroupInfoList(); + List result = new ArrayList(resp.size()); + for(GroupProtos.GroupInfo entry: resp) { + result.add(ProtobufUtil.toGroupInfo(entry)); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupOfServer(HostAndPort hostPort) throws IOException { + GroupAdminProtos.GetGroupInfoOfServerRequest request = + GroupAdminProtos.GetGroupInfoOfServerRequest.newBuilder() + .setServer(HBaseProtos.HostPort.newBuilder() + .setHostName(hostPort.getHostText()) + .setPort(hostPort.getPort()) + .build()) + .build(); + try { + return ProtobufUtil.toGroupInfo( + proxy.getGroupInfoOfServer(null, request).getGroupInfo()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void close() throws IOException { + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 0d9c73b..9ca27d1 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -38,6 +38,7 @@ import java.util.Map.Entry; import java.util.NavigableSet; import java.util.concurrent.TimeUnit; +import com.google.common.net.HostAndPort; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -70,6 +71,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.LimitInputStream; +import org.apache.hadoop.hbase.group.GroupInfo; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; @@ -110,6 +112,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; +import org.apache.hadoop.hbase.protobuf.generated.GroupProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; @@ -3038,7 +3041,7 @@ public final class ProtobufUtil { * @param builder current message builder * @param in InputStream containing protobuf data * @param size known size of protobuf data - * @throws IOException + * @throws IOException */ public static void mergeFrom(Message.Builder builder, InputStream in, int size) throws IOException { @@ -3053,7 +3056,7 @@ public final class ProtobufUtil { * buffers where the message size is not known * @param builder current message builder * @param in InputStream containing protobuf data - * @throws IOException + * @throws IOException */ public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException { @@ -3067,8 +3070,8 @@ public final class ProtobufUtil { * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding * buffers when working with ByteStrings * @param builder current message builder - * @param bs ByteString containing the - * @throws IOException + * @param bs ByteString containing the + * @throws IOException */ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException { final CodedInputStream codedInput = bs.newCodedInput(); @@ -3082,7 +3085,7 @@ public final class ProtobufUtil { * buffers when working with byte arrays * @param builder current message builder * @param b byte array - * @throws IOException + * @throws IOException */ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(b); @@ -3162,4 +3165,34 @@ public final class ProtobufUtil { } return scList; } + + public static GroupInfo toGroupInfo(GroupProtos.GroupInfo proto) { + GroupInfo groupInfo = new GroupInfo(proto.getName()); + for(HBaseProtos.HostPort el: proto.getServersList()) { + groupInfo.addServer(HostAndPort.fromParts(el.getHostName(), el.getPort())); + } + for(HBaseProtos.TableName pTableName: proto.getTablesList()) { + groupInfo.addTable(ProtobufUtil.toTableName(pTableName)); + } + return groupInfo; + } + + public static GroupProtos.GroupInfo toProtoGroupInfo(GroupInfo pojo) { + List tables = + new ArrayList(pojo.getTables().size()); + for(TableName arg: pojo.getTables()) { + tables.add(ProtobufUtil.toProtoTableName(arg)); + } + List hostports = + new ArrayList(pojo.getServers().size()); + for(HostAndPort el: pojo.getServers()) { + hostports.add(HBaseProtos.HostPort.newBuilder() + .setHostName(el.getHostText()) + .setPort(el.getPort()) + .build()); + } + return GroupProtos.GroupInfo.newBuilder().setName(pojo.getName()) + .addAllServers(hostports) + .addAllTables(tables).build(); + } } diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java index 059dcb8..a8cf67a 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -88,9 +89,8 @@ public class ServerName implements Comparable, Serializable { public static final String UNKNOWN_SERVERNAME = "#unknown#"; private final String servername; - private final String hostnameOnly; - private final int port; private final long startcode; + private final HostAndPort hostAndPort; /** * Cached versioned bytes of this ServerName instance. @@ -102,10 +102,9 @@ public class ServerName implements Comparable, Serializable { private ServerName(final String hostname, final int port, final long startcode) { // Drop the domain is there is one; no need of it in a local cluster. With it, we get long // unwieldy names. - this.hostnameOnly = hostname; - this.port = port; + this.hostAndPort = HostAndPort.fromParts(hostname, port); this.startcode = startcode; - this.servername = getServerName(this.hostnameOnly, port, startcode); + this.servername = getServerName(hostname, port, startcode); } /** @@ -189,7 +188,8 @@ public class ServerName implements Comparable, Serializable { * in compares, etc. */ public String toShortString() { - return Addressing.createHostAndPortStr(getHostNameMinusDomain(this.hostnameOnly), this.port); + return Addressing.createHostAndPortStr( + getHostNameMinusDomain(hostAndPort.getHostText()), hostAndPort.getPort()); } /** @@ -208,11 +208,11 @@ public class ServerName implements Comparable, Serializable { } public String getHostname() { - return hostnameOnly; + return hostAndPort.getHostText(); } public int getPort() { - return port; + return hostAndPort.getPort(); } public long getStartcode() { @@ -256,7 +256,11 @@ public class ServerName implements Comparable, Serializable { * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostAndPort() { - return Addressing.createHostAndPortStr(this.hostnameOnly, this.port); + return Addressing.createHostAndPortStr(hostAndPort.getHostText(), hostAndPort.getPort()); + } + + public HostAndPort getHostPort() { + return hostAndPort; } /** diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java new file mode 100644 index 0000000..2f78224 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java @@ -0,0 +1,177 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +import java.io.Serializable; +import java.util.Collection; +import java.util.NavigableSet; +import java.util.Set; + +/** + * Stores the group information of region server groups. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class GroupInfo implements Serializable { + + public static final String DEFAULT_GROUP = "default"; + public static final String NAMESPACEDESC_PROP_GROUP = "hbase.rsgroup.name"; + + private String name; + private Set servers; + private NavigableSet tables; + + public GroupInfo(String name) { + this(name, Sets.newHashSet(), Sets.newTreeSet()); + } + + GroupInfo(String name, + Set servers, + NavigableSet tables) { + this.name = name; + this.servers = servers; + this.tables = tables; + } + + public GroupInfo(GroupInfo src) { + name = src.getName(); + servers = Sets.newHashSet(src.getServers()); + tables = Sets.newTreeSet(src.getTables()); + } + + /** + * Get group name. + * + * @return + */ + public String getName() { + return name; + } + + /** + * Adds the server to the group. + * + * @param hostPort the server + */ + public void addServer(HostAndPort hostPort){ + servers.add(hostPort); + } + + /** + * Adds a group of servers. + * + * @param hostPort the servers + */ + public void addAllServers(Collection hostPort){ + servers.addAll(hostPort); + } + + /** + * @param hostPort + * @return true, if a server with hostPort is found + */ + public boolean containsServer(HostAndPort hostPort) { + return servers.contains(hostPort); + } + + /** + * Get list of servers. + * + * @return + */ + public Set getServers() { + return servers; + } + + /** + * Remove a server from this group. + * + * @param hostPort + */ + public boolean removeServer(HostAndPort hostPort) { + return servers.remove(hostPort); + } + + /** + * Set of tables that are members of this group + * @return + */ + public NavigableSet getTables() { + return tables; + } + + public void addTable(TableName table) { + tables.add(table); + } + + public void addAllTables(Collection arg) { + tables.addAll(arg); + } + + public boolean containsTable(TableName table) { + return tables.contains(table); + } + + public boolean removeTable(TableName table) { + return tables.remove(table); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + sb.append("GroupName:"); + sb.append(this.name); + sb.append(", "); + sb.append(" Servers:"); + sb.append(this.servers); + return sb.toString(); + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + GroupInfo groupInfo = (GroupInfo) o; + + if (!name.equals(groupInfo.name)) return false; + if (!servers.equals(groupInfo.servers)) return false; + if (!tables.equals(groupInfo.tables)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = servers.hashCode(); + result = 31 * result + tables.hashCode(); + result = 31 * result + name.hashCode(); + return result; + } + +} diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java new file mode 100644 index 0000000..f19c7d0 --- /dev/null +++ hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java @@ -0,0 +1,99 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.junit.After; +import org.junit.Before; +import org.junit.experimental.categories.Category; + +/** + * Runs all of the units tests defined in TestGroupBase + * as an integration test. + * Requires TestGroupBase.NUM_SLAVE_BASE servers to run. + */ +@Category(IntegrationTests.class) +public class IntegrationTestGroup extends TestGroupsBase { + //Integration specific + private final static Log LOG = LogFactory.getLog(IntegrationTestGroup.class); + private static boolean initialized = false; + + @Before + public void beforeMethod() throws Exception { + if(!initialized) { + LOG.info("Setting up IntegrationTestGroup"); + LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers"); + TEST_UTIL = new IntegrationTestingUtility(); + ((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE); + //set shared configs + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseClusterInterface(); + groupAdmin = new VerifyingGroupAdminClient(GroupAdmin.newClient(TEST_UTIL.getConnection()), + TEST_UTIL.getConfiguration()); + LOG.info("Done initializing cluster"); + initialized = true; + //cluster may not be clean + //cleanup when initializing + afterMethod(); + } + } + + @After + public void afterMethod() throws Exception { + LOG.info("Cleaning up previous test run"); + //cleanup previous artifacts + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + admin.setBalancerRunning(true, true); + + LOG.info("Restoring the cluster"); + ((IntegrationTestingUtility)TEST_UTIL).restoreCluster(); + LOG.info("Done restoring the cluster"); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + >= NUM_SLAVES_BASE; + } + }); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for regionservers to be registered "+groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + == getNumServers(); + } + }); + + LOG.info("Done cleaning up previous test run"); + } +} diff --git hbase-protocol/pom.xml hbase-protocol/pom.xml index 8034576..d352373 100644 --- hbase-protocol/pom.xml +++ hbase-protocol/pom.xml @@ -180,6 +180,8 @@ ErrorHandling.proto Filter.proto FS.proto + Group.proto + GroupAdmin.proto HBase.proto HFile.proto LoadBalancer.proto diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/GroupAdminProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/GroupAdminProtos.java new file mode 100644 index 0000000..282ff46 --- /dev/null +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/GroupAdminProtos.java @@ -0,0 +1,11852 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: GroupAdmin.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class GroupAdminProtos { + private GroupAdminProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface ListTablesOfGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfGroupRequest} + */ + public static final class ListTablesOfGroupRequest extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfGroupRequestOrBuilder { + // Use ListTablesOfGroupRequest.newBuilder() to construct. + private ListTablesOfGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfGroupRequest defaultInstance; + public static ListTablesOfGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListTablesOfGroupRequest) + } + + static { + defaultInstance = new ListTablesOfGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListTablesOfGroupRequest) + } + + public interface ListTablesOfGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.TableName table_name = 1; + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + java.util.List + getTableNameList(); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + int getTableNameCount(); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfGroupResponse} + */ + public static final class ListTablesOfGroupResponse extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfGroupResponseOrBuilder { + // Use ListTablesOfGroupResponse.newBuilder() to construct. + private ListTablesOfGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfGroupResponse defaultInstance; + public static ListTablesOfGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.util.List tableName_; + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(1, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse) obj; + + boolean result = true; + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListTablesOfGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse(this); + int from_bitField0_ = bitField0_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse.getDefaultInstance()) return this; + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListTablesOfGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.TableName table_name = 1; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListTablesOfGroupResponse) + } + + static { + defaultInstance = new ListTablesOfGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListTablesOfGroupResponse) + } + + public interface GetGroupInfoRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoRequest} + */ + public static final class GetGroupInfoRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoRequestOrBuilder { + // Use GetGroupInfoRequest.newBuilder() to construct. + private GetGroupInfoRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoRequest defaultInstance; + public static GetGroupInfoRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetGroupInfoRequest) + } + + static { + defaultInstance = new GetGroupInfoRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoRequest) + } + + public interface GetGroupInfoResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.GroupInfo group_info = 1; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo(); + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoResponse} + */ + public static final class GetGroupInfoResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoResponseOrBuilder { + // Use GetGroupInfoResponse.newBuilder() to construct. + private GetGroupInfoResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoResponse defaultInstance; + public static GetGroupInfoResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo groupInfo_; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetGroupInfoResponse) + } + + static { + defaultInstance = new GetGroupInfoResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoResponse) + } + + public interface GetGroupInfoOfTableRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table_name = 1; + /** + * required .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfTableRequest} + */ + public static final class GetGroupInfoOfTableRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfTableRequestOrBuilder { + // Use GetGroupInfoOfTableRequest.newBuilder() to construct. + private GetGroupInfoOfTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfTableRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfTableRequest defaultInstance; + public static GetGroupInfoOfTableRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfTableRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfTableRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfTableRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfTableRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetGroupInfoOfTableRequest) + } + + static { + defaultInstance = new GetGroupInfoOfTableRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfTableRequest) + } + + public interface GetGroupInfoOfTableResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.GroupInfo group_info = 1; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo(); + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfTableResponse} + */ + public static final class GetGroupInfoOfTableResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfTableResponseOrBuilder { + // Use GetGroupInfoOfTableResponse.newBuilder() to construct. + private GetGroupInfoOfTableResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfTableResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfTableResponse defaultInstance; + public static GetGroupInfoOfTableResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfTableResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfTableResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfTableResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfTableResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo groupInfo_; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfTableResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfTableResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetGroupInfoOfTableResponse) + } + + static { + defaultInstance = new GetGroupInfoOfTableResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfTableResponse) + } + + public interface MoveServersRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .hbase.pb.HostPort servers = 2; + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + java.util.List + getServersList(); + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index); + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + int getServersCount(); + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.MoveServersRequest} + */ + public static final class MoveServersRequest extends + com.google.protobuf.GeneratedMessage + implements MoveServersRequestOrBuilder { + // Use MoveServersRequest.newBuilder() to construct. + private MoveServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersRequest defaultInstance; + public static MoveServersRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveServersRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.HostPort servers = 2; + public static final int SERVERS_FIELD_NUMBER = 2; + private java.util.List servers_; + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + return servers_.get(index); + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + servers_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(2, servers_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, servers_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveServersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.HostPort servers = 2; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serversBuilder_; + + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .hbase.pb.HostPort servers = 2; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersRequest) + } + + static { + defaultInstance = new MoveServersRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersRequest) + } + + public interface MoveServersResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.MoveServersResponse} + */ + public static final class MoveServersResponse extends + com.google.protobuf.GeneratedMessage + implements MoveServersResponseOrBuilder { + // Use MoveServersResponse.newBuilder() to construct. + private MoveServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersResponse defaultInstance; + public static MoveServersResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveServersResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveServersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersResponse) + } + + static { + defaultInstance = new MoveServersResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersResponse) + } + + public interface MoveTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .hbase.pb.TableName table_name = 2; + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + java.util.List + getTableNameList(); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + int getTableNameCount(); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.MoveTablesRequest} + */ + public static final class MoveTablesRequest extends + com.google.protobuf.GeneratedMessage + implements MoveTablesRequestOrBuilder { + // Use MoveTablesRequest.newBuilder() to construct. + private MoveTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesRequest defaultInstance; + public static MoveTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private java.util.List tableName_; + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(2, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_name = 2; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveTablesRequest) + } + + static { + defaultInstance = new MoveTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveTablesRequest) + } + + public interface MoveTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.MoveTablesResponse} + */ + public static final class MoveTablesResponse extends + com.google.protobuf.GeneratedMessage + implements MoveTablesResponseOrBuilder { + // Use MoveTablesResponse.newBuilder() to construct. + private MoveTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesResponse defaultInstance; + public static MoveTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveTablesResponse) + } + + static { + defaultInstance = new MoveTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveTablesResponse) + } + + public interface AddGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.AddGroupRequest} + */ + public static final class AddGroupRequest extends + com.google.protobuf.GeneratedMessage + implements AddGroupRequestOrBuilder { + // Use AddGroupRequest.newBuilder() to construct. + private AddGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddGroupRequest defaultInstance; + public static AddGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public AddGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AddGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AddGroupRequest) + } + + static { + defaultInstance = new AddGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AddGroupRequest) + } + + public interface AddGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.AddGroupResponse} + */ + public static final class AddGroupResponse extends + com.google.protobuf.GeneratedMessage + implements AddGroupResponseOrBuilder { + // Use AddGroupResponse.newBuilder() to construct. + private AddGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddGroupResponse defaultInstance; + public static AddGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public AddGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AddGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_AddGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AddGroupResponse) + } + + static { + defaultInstance = new AddGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AddGroupResponse) + } + + public interface RemoveGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.RemoveGroupRequest} + */ + public static final class RemoveGroupRequest extends + com.google.protobuf.GeneratedMessage + implements RemoveGroupRequestOrBuilder { + // Use RemoveGroupRequest.newBuilder() to construct. + private RemoveGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveGroupRequest defaultInstance; + public static RemoveGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public RemoveGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RemoveGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveGroupRequest) + } + + static { + defaultInstance = new RemoveGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RemoveGroupRequest) + } + + public interface RemoveGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.RemoveGroupResponse} + */ + public static final class RemoveGroupResponse extends + com.google.protobuf.GeneratedMessage + implements RemoveGroupResponseOrBuilder { + // Use RemoveGroupResponse.newBuilder() to construct. + private RemoveGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveGroupResponse defaultInstance; + public static RemoveGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public RemoveGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RemoveGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_RemoveGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveGroupResponse) + } + + static { + defaultInstance = new RemoveGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RemoveGroupResponse) + } + + public interface BalanceGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.BalanceGroupRequest} + */ + public static final class BalanceGroupRequest extends + com.google.protobuf.GeneratedMessage + implements BalanceGroupRequestOrBuilder { + // Use BalanceGroupRequest.newBuilder() to construct. + private BalanceGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceGroupRequest defaultInstance; + public static BalanceGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public BalanceGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BalanceGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceGroupRequest) + } + + static { + defaultInstance = new BalanceGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BalanceGroupRequest) + } + + public interface BalanceGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool balanceRan = 1; + /** + * required bool balanceRan = 1; + */ + boolean hasBalanceRan(); + /** + * required bool balanceRan = 1; + */ + boolean getBalanceRan(); + } + /** + * Protobuf type {@code hbase.pb.BalanceGroupResponse} + */ + public static final class BalanceGroupResponse extends + com.google.protobuf.GeneratedMessage + implements BalanceGroupResponseOrBuilder { + // Use BalanceGroupResponse.newBuilder() to construct. + private BalanceGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceGroupResponse defaultInstance; + public static BalanceGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public BalanceGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + balanceRan_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool balanceRan = 1; + public static final int BALANCERAN_FIELD_NUMBER = 1; + private boolean balanceRan_; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + + private void initFields() { + balanceRan_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBalanceRan()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, balanceRan_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, balanceRan_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse) obj; + + boolean result = true; + result = result && (hasBalanceRan() == other.hasBalanceRan()); + if (hasBalanceRan()) { + result = result && (getBalanceRan() + == other.getBalanceRan()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBalanceRan()) { + hash = (37 * hash) + BALANCERAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBalanceRan()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BalanceGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + balanceRan_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_BalanceGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.balanceRan_ = balanceRan_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance()) return this; + if (other.hasBalanceRan()) { + setBalanceRan(other.getBalanceRan()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBalanceRan()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool balanceRan = 1; + private boolean balanceRan_ ; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + /** + * required bool balanceRan = 1; + */ + public Builder setBalanceRan(boolean value) { + bitField0_ |= 0x00000001; + balanceRan_ = value; + onChanged(); + return this; + } + /** + * required bool balanceRan = 1; + */ + public Builder clearBalanceRan() { + bitField0_ = (bitField0_ & ~0x00000001); + balanceRan_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceGroupResponse) + } + + static { + defaultInstance = new BalanceGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BalanceGroupResponse) + } + + public interface ListGroupInfosRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ListGroupInfosRequest} + */ + public static final class ListGroupInfosRequest extends + com.google.protobuf.GeneratedMessage + implements ListGroupInfosRequestOrBuilder { + // Use ListGroupInfosRequest.newBuilder() to construct. + private ListGroupInfosRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListGroupInfosRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListGroupInfosRequest defaultInstance; + public static ListGroupInfosRequest getDefaultInstance() { + return defaultInstance; + } + + public ListGroupInfosRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListGroupInfosRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListGroupInfosRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListGroupInfosRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListGroupInfosRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListGroupInfosRequest) + } + + static { + defaultInstance = new ListGroupInfosRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListGroupInfosRequest) + } + + public interface ListGroupInfosResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.GroupInfo group_info = 1; + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + java.util.List + getGroupInfoList(); + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo(int index); + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + int getGroupInfoCount(); + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + java.util.List + getGroupInfoOrBuilderList(); + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListGroupInfosResponse} + */ + public static final class ListGroupInfosResponse extends + com.google.protobuf.GeneratedMessage + implements ListGroupInfosResponseOrBuilder { + // Use ListGroupInfosResponse.newBuilder() to construct. + private ListGroupInfosResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListGroupInfosResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListGroupInfosResponse defaultInstance; + public static ListGroupInfosResponse getDefaultInstance() { + return defaultInstance; + } + + public ListGroupInfosResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListGroupInfosResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + groupInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = java.util.Collections.unmodifiableList(groupInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListGroupInfosResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListGroupInfosResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private java.util.List groupInfo_; + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public java.util.List getGroupInfoList() { + return groupInfo_; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoOrBuilderList() { + return groupInfo_; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public int getGroupInfoCount() { + return groupInfo_.size(); + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo(int index) { + return groupInfo_.get(index); + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index) { + return groupInfo_.get(index); + } + + private void initFields() { + groupInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getGroupInfoCount(); i++) { + if (!getGroupInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < groupInfo_.size(); i++) { + output.writeMessage(1, groupInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < groupInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse) obj; + + boolean result = true; + result = result && getGroupInfoList() + .equals(other.getGroupInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getGroupInfoCount() > 0) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListGroupInfosResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + groupInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_ListGroupInfosResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse(this); + int from_bitField0_ = bitField0_; + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = java.util.Collections.unmodifiableList(groupInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance()) return this; + if (groupInfoBuilder_ == null) { + if (!other.groupInfo_.isEmpty()) { + if (groupInfo_.isEmpty()) { + groupInfo_ = other.groupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureGroupInfoIsMutable(); + groupInfo_.addAll(other.groupInfo_); + } + onChanged(); + } + } else { + if (!other.groupInfo_.isEmpty()) { + if (groupInfoBuilder_.isEmpty()) { + groupInfoBuilder_.dispose(); + groupInfoBuilder_ = null; + groupInfo_ = other.groupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + groupInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getGroupInfoFieldBuilder() : null; + } else { + groupInfoBuilder_.addAllMessages(other.groupInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getGroupInfoCount(); i++) { + if (!getGroupInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.GroupInfo group_info = 1; + private java.util.List groupInfo_ = + java.util.Collections.emptyList(); + private void ensureGroupInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = new java.util.ArrayList(groupInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public java.util.List getGroupInfoList() { + if (groupInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(groupInfo_); + } else { + return groupInfoBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public int getGroupInfoCount() { + if (groupInfoBuilder_ == null) { + return groupInfo_.size(); + } else { + return groupInfoBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo(int index) { + if (groupInfoBuilder_ == null) { + return groupInfo_.get(index); + } else { + return groupInfoBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.set(index, value); + onChanged(); + } else { + groupInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder addGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.add(value); + onChanged(); + } else { + groupInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.add(index, value); + onChanged(); + } else { + groupInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.add(builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder addAllGroupInfo( + java.lang.Iterable values) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + super.addAll(values, groupInfo_); + onChanged(); + } else { + groupInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public Builder removeGroupInfo(int index) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.remove(index); + onChanged(); + } else { + groupInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder getGroupInfoBuilder( + int index) { + return getGroupInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index) { + if (groupInfoBuilder_ == null) { + return groupInfo_.get(index); } else { + return groupInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoOrBuilderList() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(groupInfo_); + } + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder addGroupInfoBuilder() { + return getGroupInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder addGroupInfoBuilder( + int index) { + return getGroupInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoBuilderList() { + return getGroupInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder>( + groupInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListGroupInfosResponse) + } + + static { + defaultInstance = new ListGroupInfosResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListGroupInfosResponse) + } + + public interface GetGroupInfoOfServerRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.HostPort server = 1; + /** + * required .hbase.pb.HostPort server = 1; + */ + boolean hasServer(); + /** + * required .hbase.pb.HostPort server = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServer(); + /** + * required .hbase.pb.HostPort server = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServerOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfServerRequest} + */ + public static final class GetGroupInfoOfServerRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfServerRequestOrBuilder { + // Use GetGroupInfoOfServerRequest.newBuilder() to construct. + private GetGroupInfoOfServerRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfServerRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfServerRequest defaultInstance; + public static GetGroupInfoOfServerRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfServerRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfServerRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = server_.toBuilder(); + } + server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(server_); + server_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfServerRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfServerRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.HostPort server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort server_; + /** + * required .hbase.pb.HostPort server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServer() { + return server_; + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServerOrBuilder() { + return server_; + } + + private void initFields() { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, server_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, server_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfServerRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.getDefaultInstance()) return this; + if (other.hasServer()) { + mergeServer(other.getServer()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!getServer().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.HostPort server = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serverBuilder_; + /** + * required .hbase.pb.HostPort server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + /** + * required .hbase.pb.HostPort server = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetGroupInfoOfServerRequest) + } + + static { + defaultInstance = new GetGroupInfoOfServerRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfServerRequest) + } + + public interface GetGroupInfoOfServerResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.GroupInfo group_info = 1; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo(); + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfServerResponse} + */ + public static final class GetGroupInfoOfServerResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfServerResponseOrBuilder { + // Use GetGroupInfoOfServerResponse.newBuilder() to construct. + private GetGroupInfoOfServerResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfServerResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfServerResponse defaultInstance; + public static GetGroupInfoOfServerResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfServerResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfServerResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfServerResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfServerResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo groupInfo_; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse other = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetGroupInfoOfServerResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse build() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse result = new org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .hbase.pb.GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetGroupInfoOfServerResponse) + } + + static { + defaultInstance = new GetGroupInfoOfServerResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfServerResponse) + } + + /** + * Protobuf service {@code hbase.pb.GroupAdminService} + */ + public static abstract class GroupAdminService + implements com.google.protobuf.Service { + protected GroupAdminService() {} + + public interface Interface { + /** + * rpc GetGroupInfo(.hbase.pb.GetGroupInfoRequest) returns (.hbase.pb.GetGroupInfoResponse); + */ + public abstract void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfTable(.hbase.pb.GetGroupInfoOfTableRequest) returns (.hbase.pb.GetGroupInfoOfTableResponse); + */ + public abstract void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfServer(.hbase.pb.GetGroupInfoOfServerRequest) returns (.hbase.pb.GetGroupInfoOfServerResponse); + */ + public abstract void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.hbase.pb.MoveServersRequest) returns (.hbase.pb.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.hbase.pb.MoveTablesRequest) returns (.hbase.pb.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddGroup(.hbase.pb.AddGroupRequest) returns (.hbase.pb.AddGroupResponse); + */ + public abstract void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveGroup(.hbase.pb.RemoveGroupRequest) returns (.hbase.pb.RemoveGroupResponse); + */ + public abstract void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceGroup(.hbase.pb.BalanceGroupRequest) returns (.hbase.pb.BalanceGroupResponse); + */ + public abstract void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListGroupInfos(.hbase.pb.ListGroupInfosRequest) returns (.hbase.pb.ListGroupInfosResponse); + */ + public abstract void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new GroupAdminService() { + @java.lang.Override + public void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfo(controller, request, done); + } + + @java.lang.Override + public void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfoOfTable(controller, request, done); + } + + @java.lang.Override + public void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfoOfServer(controller, request, done); + } + + @java.lang.Override + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveServers(controller, request, done); + } + + @java.lang.Override + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveTables(controller, request, done); + } + + @java.lang.Override + public void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.addGroup(controller, request, done); + } + + @java.lang.Override + public void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.removeGroup(controller, request, done); + } + + @java.lang.Override + public void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.balanceGroup(controller, request, done); + } + + @java.lang.Override + public void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done) { + impl.listGroupInfos(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.getGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest)request); + case 1: + return impl.getGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest)request); + case 2: + return impl.getGroupInfoOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest)request); + case 3: + return impl.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest)request); + case 4: + return impl.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest)request); + case 5: + return impl.addGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest)request); + case 6: + return impl.removeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest)request); + case 7: + return impl.balanceGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest)request); + case 8: + return impl.listGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc GetGroupInfo(.hbase.pb.GetGroupInfoRequest) returns (.hbase.pb.GetGroupInfoResponse); + */ + public abstract void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfTable(.hbase.pb.GetGroupInfoOfTableRequest) returns (.hbase.pb.GetGroupInfoOfTableResponse); + */ + public abstract void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfServer(.hbase.pb.GetGroupInfoOfServerRequest) returns (.hbase.pb.GetGroupInfoOfServerResponse); + */ + public abstract void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.hbase.pb.MoveServersRequest) returns (.hbase.pb.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.hbase.pb.MoveTablesRequest) returns (.hbase.pb.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddGroup(.hbase.pb.AddGroupRequest) returns (.hbase.pb.AddGroupResponse); + */ + public abstract void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveGroup(.hbase.pb.RemoveGroupRequest) returns (.hbase.pb.RemoveGroupResponse); + */ + public abstract void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceGroup(.hbase.pb.BalanceGroupRequest) returns (.hbase.pb.BalanceGroupResponse); + */ + public abstract void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListGroupInfos(.hbase.pb.ListGroupInfosRequest) returns (.hbase.pb.ListGroupInfosResponse); + */ + public abstract void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.getGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.getGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.getGroupInfoOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.addGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.removeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: + this.balanceGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 8: + this.listGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GroupAdminService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance())); + } + + public void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance())); + } + + public void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance())); + } + + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance())); + } + + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance())); + } + + public void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance())); + } + + public void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance())); + } + + public void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance())); + } + + public void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.class, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GroupAdminService) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListTablesOfGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListTablesOfGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListTablesOfGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListTablesOfGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetGroupInfoRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetGroupInfoRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetGroupInfoResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetGroupInfoResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetGroupInfoOfTableRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetGroupInfoOfTableRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetGroupInfoOfTableResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetGroupInfoOfTableResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveServersRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveServersResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AddGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AddGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AddGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AddGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RemoveGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RemoveGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RemoveGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RemoveGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BalanceGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BalanceGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BalanceGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BalanceGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListGroupInfosRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListGroupInfosRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListGroupInfosResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListGroupInfosResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetGroupInfoOfServerRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetGroupInfoOfServerRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetGroupInfoOfServerResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\020GroupAdmin.proto\022\010hbase.pb\032\013HBase.prot" + + "o\032\013Group.proto\".\n\030ListTablesOfGroupReque" + + "st\022\022\n\ngroup_name\030\001 \002(\t\"D\n\031ListTablesOfGr" + + "oupResponse\022\'\n\ntable_name\030\001 \003(\0132\023.hbase." + + "pb.TableName\")\n\023GetGroupInfoRequest\022\022\n\ng" + + "roup_name\030\001 \002(\t\"?\n\024GetGroupInfoResponse\022" + + "\'\n\ngroup_info\030\001 \001(\0132\023.hbase.pb.GroupInfo" + + "\"E\n\032GetGroupInfoOfTableRequest\022\'\n\ntable_" + + "name\030\001 \002(\0132\023.hbase.pb.TableName\"F\n\033GetGr" + + "oupInfoOfTableResponse\022\'\n\ngroup_info\030\001 \001", + "(\0132\023.hbase.pb.GroupInfo\"O\n\022MoveServersRe" + + "quest\022\024\n\014target_group\030\001 \002(\t\022#\n\007servers\030\002" + + " \003(\0132\022.hbase.pb.HostPort\"\025\n\023MoveServersR" + + "esponse\"R\n\021MoveTablesRequest\022\024\n\014target_g" + + "roup\030\001 \002(\t\022\'\n\ntable_name\030\002 \003(\0132\023.hbase.p" + + "b.TableName\"\024\n\022MoveTablesResponse\"%\n\017Add" + + "GroupRequest\022\022\n\ngroup_name\030\001 \002(\t\"\022\n\020AddG" + + "roupResponse\"(\n\022RemoveGroupRequest\022\022\n\ngr" + + "oup_name\030\001 \002(\t\"\025\n\023RemoveGroupResponse\")\n" + + "\023BalanceGroupRequest\022\022\n\ngroup_name\030\001 \002(\t", + "\"*\n\024BalanceGroupResponse\022\022\n\nbalanceRan\030\001" + + " \002(\010\"\027\n\025ListGroupInfosRequest\"A\n\026ListGro" + + "upInfosResponse\022\'\n\ngroup_info\030\001 \003(\0132\023.hb" + + "ase.pb.GroupInfo\"A\n\033GetGroupInfoOfServer" + + "Request\022\"\n\006server\030\001 \002(\0132\022.hbase.pb.HostP" + + "ort\"G\n\034GetGroupInfoOfServerResponse\022\'\n\ng" + + "roup_info\030\001 \001(\0132\023.hbase.pb.GroupInfo2\365\005\n" + + "\021GroupAdminService\022M\n\014GetGroupInfo\022\035.hba" + + "se.pb.GetGroupInfoRequest\032\036.hbase.pb.Get" + + "GroupInfoResponse\022b\n\023GetGroupInfoOfTable", + "\022$.hbase.pb.GetGroupInfoOfTableRequest\032%" + + ".hbase.pb.GetGroupInfoOfTableResponse\022e\n" + + "\024GetGroupInfoOfServer\022%.hbase.pb.GetGrou" + + "pInfoOfServerRequest\032&.hbase.pb.GetGroup" + + "InfoOfServerResponse\022J\n\013MoveServers\022\034.hb" + + "ase.pb.MoveServersRequest\032\035.hbase.pb.Mov" + + "eServersResponse\022G\n\nMoveTables\022\033.hbase.p" + + "b.MoveTablesRequest\032\034.hbase.pb.MoveTable" + + "sResponse\022A\n\010AddGroup\022\031.hbase.pb.AddGrou" + + "pRequest\032\032.hbase.pb.AddGroupResponse\022J\n\013", + "RemoveGroup\022\034.hbase.pb.RemoveGroupReques" + + "t\032\035.hbase.pb.RemoveGroupResponse\022M\n\014Bala" + + "nceGroup\022\035.hbase.pb.BalanceGroupRequest\032" + + "\036.hbase.pb.BalanceGroupResponse\022S\n\016ListG" + + "roupInfos\022\037.hbase.pb.ListGroupInfosReque" + + "st\032 .hbase.pb.ListGroupInfosResponseBF\n*" + + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\020GroupAdminProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_ListTablesOfGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_ListTablesOfGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListTablesOfGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_hbase_pb_ListTablesOfGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_ListTablesOfGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListTablesOfGroupResponse_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_hbase_pb_GetGroupInfoRequest_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_hbase_pb_GetGroupInfoRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetGroupInfoRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_hbase_pb_GetGroupInfoResponse_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_hbase_pb_GetGroupInfoResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetGroupInfoResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_hbase_pb_GetGroupInfoOfTableRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_hbase_pb_GetGroupInfoOfTableRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetGroupInfoOfTableRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_hbase_pb_GetGroupInfoOfTableResponse_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_GetGroupInfoOfTableResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetGroupInfoOfTableResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_hbase_pb_MoveServersRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveServersRequest_descriptor, + new java.lang.String[] { "TargetGroup", "Servers", }); + internal_static_hbase_pb_MoveServersResponse_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveServersResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_MoveTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveTablesRequest_descriptor, + new java.lang.String[] { "TargetGroup", "TableName", }); + internal_static_hbase_pb_MoveTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveTablesResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_AddGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_hbase_pb_AddGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AddGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_hbase_pb_AddGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_hbase_pb_AddGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AddGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_RemoveGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_RemoveGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RemoveGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_hbase_pb_RemoveGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_RemoveGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RemoveGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_BalanceGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hbase_pb_BalanceGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BalanceGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_hbase_pb_BalanceGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_hbase_pb_BalanceGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BalanceGroupResponse_descriptor, + new java.lang.String[] { "BalanceRan", }); + internal_static_hbase_pb_ListGroupInfosRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_hbase_pb_ListGroupInfosRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListGroupInfosRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_ListGroupInfosResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_hbase_pb_ListGroupInfosResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListGroupInfosResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_hbase_pb_GetGroupInfoOfServerRequest_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_hbase_pb_GetGroupInfoOfServerRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetGroupInfoOfServerRequest_descriptor, + new java.lang.String[] { "Server", }); + internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_hbase_pb_GetGroupInfoOfServerResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/GroupProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/GroupProtos.java new file mode 100644 index 0000000..a786424 --- /dev/null +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/GroupProtos.java @@ -0,0 +1,1331 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Group.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class GroupProtos { + private GroupProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface GroupInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // repeated .hbase.pb.HostPort servers = 4; + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + java.util.List + getServersList(); + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index); + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + int getServersCount(); + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index); + + // repeated .hbase.pb.TableName tables = 3; + /** + * repeated .hbase.pb.TableName tables = 3; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.GroupInfo} + */ + public static final class GroupInfo extends + com.google.protobuf.GeneratedMessage + implements GroupInfoOrBuilder { + // Use GroupInfo.newBuilder() to construct. + private GroupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GroupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GroupInfo defaultInstance; + public static GroupInfo getDefaultInstance() { + return defaultInstance; + } + + public GroupInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GroupInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupProtos.internal_static_hbase_pb_GroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupProtos.internal_static_hbase_pb_GroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GroupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GroupInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.HostPort servers = 4; + public static final int SERVERS_FIELD_NUMBER = 4; + private java.util.List servers_; + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + return servers_.get(index); + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + // repeated .hbase.pb.TableName tables = 3; + public static final int TABLES_FIELD_NUMBER = 3; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + private void initFields() { + name_ = ""; + servers_ = java.util.Collections.emptyList(); + tables_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(3, tables_.get(i)); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(4, servers_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tables_.get(i)); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, servers_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo other = (org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GroupInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.GroupProtos.internal_static_hbase_pb_GroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.GroupProtos.internal_static_hbase_pb_GroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + tablesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupProtos.internal_static_hbase_pb_GroupInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo result = new org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.GroupProtos.GroupInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.HostPort servers = 4; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serversBuilder_; + + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .hbase.pb.HostPort servers = 4; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // repeated .hbase.pb.TableName tables = 3; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GroupInfo) + } + + static { + defaultInstance = new GroupInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GroupInfo) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GroupInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GroupInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\013Group.proto\022\010hbase.pb\032\013HBase.proto\"c\n\t" + + "GroupInfo\022\014\n\004name\030\001 \002(\t\022#\n\007servers\030\004 \003(\013" + + "2\022.hbase.pb.HostPort\022#\n\006tables\030\003 \003(\0132\023.h" + + "base.pb.TableNameBA\n*org.apache.hadoop.h" + + "base.protobuf.generatedB\013GroupProtosH\001\210\001" + + "\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_GroupInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_GroupInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GroupInfo_descriptor, + new java.lang.String[] { "Name", "Servers", "Tables", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 623da6c..fb273ff 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -19255,6 +19255,621 @@ public final class HBaseProtos { // @@protoc_insertion_point(class_scope:hbase.pb.RegionServerInfo) } + public interface HostPortOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string host_name = 1; + /** + * required string host_name = 1; + */ + boolean hasHostName(); + /** + * required string host_name = 1; + */ + java.lang.String getHostName(); + /** + * required string host_name = 1; + */ + com.google.protobuf.ByteString + getHostNameBytes(); + + // required uint32 port = 2; + /** + * required uint32 port = 2; + */ + boolean hasPort(); + /** + * required uint32 port = 2; + */ + int getPort(); + } + /** + * Protobuf type {@code hbase.pb.HostPort} + */ + public static final class HostPort extends + com.google.protobuf.GeneratedMessage + implements HostPortOrBuilder { + // Use HostPort.newBuilder() to construct. + private HostPort(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private HostPort(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final HostPort defaultInstance; + public static HostPort getDefaultInstance() { + return defaultInstance; + } + + public HostPort getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private HostPort( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + hostName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + port_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_HostPort_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_HostPort_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public HostPort parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new HostPort(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string host_name = 1; + public static final int HOST_NAME_FIELD_NUMBER = 1; + private java.lang.Object hostName_; + /** + * required string host_name = 1; + */ + public boolean hasHostName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string host_name = 1; + */ + public java.lang.String getHostName() { + java.lang.Object ref = hostName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + hostName_ = s; + } + return s; + } + } + /** + * required string host_name = 1; + */ + public com.google.protobuf.ByteString + getHostNameBytes() { + java.lang.Object ref = hostName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hostName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint32 port = 2; + public static final int PORT_FIELD_NUMBER = 2; + private int port_; + /** + * required uint32 port = 2; + */ + public boolean hasPort() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 port = 2; + */ + public int getPort() { + return port_; + } + + private void initFields() { + hostName_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasHostName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getHostNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getHostNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) obj; + + boolean result = true; + result = result && (hasHostName() == other.hasHostName()); + if (hasHostName()) { + result = result && getHostName() + .equals(other.getHostName()); + } + result = result && (hasPort() == other.hasPort()); + if (hasPort()) { + result = result && (getPort() + == other.getPort()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasHostName()) { + hash = (37 * hash) + HOST_NAME_FIELD_NUMBER; + hash = (53 * hash) + getHostName().hashCode(); + } + if (hasPort()) { + hash = (37 * hash) + PORT_FIELD_NUMBER; + hash = (53 * hash) + getPort(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.HostPort} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_HostPort_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_HostPort_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + hostName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_hbase_pb_HostPort_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.hostName_ = hostName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()) return this; + if (other.hasHostName()) { + bitField0_ |= 0x00000001; + hostName_ = other.hostName_; + onChanged(); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasHostName()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string host_name = 1; + private java.lang.Object hostName_ = ""; + /** + * required string host_name = 1; + */ + public boolean hasHostName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string host_name = 1; + */ + public java.lang.String getHostName() { + java.lang.Object ref = hostName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hostName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string host_name = 1; + */ + public com.google.protobuf.ByteString + getHostNameBytes() { + java.lang.Object ref = hostName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hostName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string host_name = 1; + */ + public Builder setHostName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostName_ = value; + onChanged(); + return this; + } + /** + * required string host_name = 1; + */ + public Builder clearHostName() { + bitField0_ = (bitField0_ & ~0x00000001); + hostName_ = getDefaultInstance().getHostName(); + onChanged(); + return this; + } + /** + * required string host_name = 1; + */ + public Builder setHostNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostName_ = value; + onChanged(); + return this; + } + + // required uint32 port = 2; + private int port_ ; + /** + * required uint32 port = 2; + */ + public boolean hasPort() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 port = 2; + */ + public int getPort() { + return port_; + } + /** + * required uint32 port = 2; + */ + public Builder setPort(int value) { + bitField0_ |= 0x00000002; + port_ = value; + onChanged(); + return this; + } + /** + * required uint32 port = 2; + */ + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000002); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.HostPort) + } + + static { + defaultInstance = new HostPort(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.HostPort) + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_TableName_descriptor; private static @@ -19380,6 +19995,11 @@ public final class HBaseProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_HostPort_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_HostPort_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -19442,14 +20062,15 @@ public final class HBaseProtos { "\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\"Q\n" + "\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014v" + "ersion_info\030\002 \001(\0132\025.hbase.pb.VersionInfo" + - "*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQU" + - "AL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATE" + - "R_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010" + - "TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECOND" + - "S\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MI" + - "NUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apac" + - "he.hadoop.hbase.protobuf.generatedB\013HBas", - "eProtosH\001\240\001\001" + "\"+\n\010HostPort\022\021\n\thost_name\030\001 \002(\t\022\014\n\004port\030" + + "\002 \002(\r*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_O" + + "R_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020G" + + "REATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020" + + "\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROS" + + "ECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022" + + "\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org", + ".apache.hadoop.hbase.protobuf.generatedB" + + "\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -19606,6 +20227,12 @@ public final class HBaseProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RegionServerInfo_descriptor, new java.lang.String[] { "InfoPort", "VersionInfo", }); + internal_static_hbase_pb_HostPort_descriptor = + getDescriptor().getMessageTypes().get(25); + internal_static_hbase_pb_HostPort_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_HostPort_descriptor, + new java.lang.String[] { "HostName", "Port", }); return null; } }; diff --git hbase-protocol/src/main/protobuf/Group.proto hbase-protocol/src/main/protobuf/Group.proto new file mode 100644 index 0000000..10e3ab8 --- /dev/null +++ hbase-protocol/src/main/protobuf/Group.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "GroupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +message GroupInfo { + required string name = 1; + repeated HostPort servers = 4; + repeated TableName tables = 3; +} + diff --git hbase-protocol/src/main/protobuf/GroupAdmin.proto hbase-protocol/src/main/protobuf/GroupAdmin.proto new file mode 100644 index 0000000..b6a1feb --- /dev/null +++ hbase-protocol/src/main/protobuf/GroupAdmin.proto @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "GroupAdminProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "Group.proto"; + +/** Group level protobufs */ + +message ListTablesOfGroupRequest { + required string group_name = 1; +} + +message ListTablesOfGroupResponse { + repeated TableName table_name = 1; +} + +message GetGroupInfoRequest { + required string group_name = 1; +} + +message GetGroupInfoResponse { + optional GroupInfo group_info = 1; +} + +message GetGroupInfoOfTableRequest { + required TableName table_name = 1; +} + +message GetGroupInfoOfTableResponse { + optional GroupInfo group_info = 1; +} + +message MoveServersRequest { + required string target_group = 1; + repeated HostPort servers = 2; +} + +message MoveServersResponse { +} + +message MoveTablesRequest { + required string target_group = 1; + repeated TableName table_name = 2; +} + +message MoveTablesResponse { +} + +message AddGroupRequest { + required string group_name = 1; +} + +message AddGroupResponse { +} + +message RemoveGroupRequest { + required string group_name = 1; +} + +message RemoveGroupResponse { +} + +message BalanceGroupRequest { + required string group_name = 1; +} + +message BalanceGroupResponse { + required bool balanceRan = 1; +} + +message ListGroupInfosRequest { +} + +message ListGroupInfosResponse { + repeated GroupInfo group_info = 1; +} + +message GetGroupInfoOfServerRequest { + required HostPort server = 1; +} + +message GetGroupInfoOfServerResponse { + optional GroupInfo group_info = 1; +} + +service GroupAdminService { + rpc GetGroupInfo(GetGroupInfoRequest) + returns (GetGroupInfoResponse); + + rpc GetGroupInfoOfTable(GetGroupInfoOfTableRequest) + returns (GetGroupInfoOfTableResponse); + + rpc GetGroupInfoOfServer(GetGroupInfoOfServerRequest) + returns (GetGroupInfoOfServerResponse); + + rpc MoveServers(MoveServersRequest) + returns (MoveServersResponse); + + rpc MoveTables(MoveTablesRequest) + returns (MoveTablesResponse); + + rpc AddGroup(AddGroupRequest) + returns (AddGroupResponse); + + rpc RemoveGroup(RemoveGroupRequest) + returns (RemoveGroupResponse); + + rpc BalanceGroup(BalanceGroupRequest) + returns (BalanceGroupResponse); + + rpc ListGroupInfos(ListGroupInfosRequest) + returns (ListGroupInfosResponse); +} diff --git hbase-protocol/src/main/protobuf/HBase.proto hbase-protocol/src/main/protobuf/HBase.proto index 2603efa..683ccc2 100644 --- hbase-protocol/src/main/protobuf/HBase.proto +++ hbase-protocol/src/main/protobuf/HBase.proto @@ -248,3 +248,9 @@ message RegionServerInfo { optional int32 infoPort = 1; optional VersionInfo version_info = 2; } + +message HostPort { + required string host_name = 1; + required uint32 port = 2; +} + diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index f28ef94..508ee35 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.List; +import java.util.Set; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -621,4 +623,54 @@ public abstract class BaseMasterAndRegionObserver extends BaseRegionObserver public void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException { } + + @Override + public void postAddGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, Set + servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, Set + tables, String targetGroup) throws IOException { + } + + @Override + public void postRemoveGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, String groupName) + throws IOException { + } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, String name) + throws IOException { + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index d005389..43ee075 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.List; +import java.util.Set; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -624,4 +626,55 @@ public class BaseMasterObserver implements MasterObserver { public void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, Set + servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, Set + servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, Set + tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postAddGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, String name) + throws IOException { + + } + + @Override + public void postRemoveGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, String groupName) + throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index ede8cd4..75b2ca4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.List; +import java.util.Set; +import com.google.common.net.HostAndPort; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -1217,4 +1219,98 @@ public interface MasterObserver extends Coprocessor { */ void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException; + + /** + * Called before servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers + * @param targetGroup + * @throws IOException + */ + void preMoveServers(final ObserverContext ctx, + Set servers, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers + * @param targetGroup + * @throws IOException + */ + void postMoveServers(final ObserverContext ctx, + Set servers, String targetGroup) throws IOException; + + /** + * Called before tables are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param tables + * @param targetGroup + * @throws IOException + */ + void preMoveTables(final ObserverContext ctx, + Set tables, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param tables + * @param targetGroup + * @throws IOException + */ + void postMoveTables(final ObserverContext ctx, + Set tables, String targetGroup) throws IOException; + + /** + * Called before a new region server group is added + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void preAddGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called after a new region server group is added + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void postAddGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void preRemoveGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void postRemoveGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @throws IOException + */ + void preBalanceGroup(final ObserverContext ctx, + String groupName) throws IOException; + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @throws IOException + */ + void postBalanceGroup(final ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException; } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminEndpoint.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminEndpoint.java new file mode 100644 index 0000000..789c8a1 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminEndpoint.java @@ -0,0 +1,949 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.MasterObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.AddGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.BalanceGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GetGroupInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.GroupAdminService; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.ListGroupInfosResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.MoveTablesResponse; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos.RemoveGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.GroupAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class GroupAdminEndpoint extends GroupAdminService + implements CoprocessorService, Coprocessor, MasterObserver { + + private static final Log LOG = LogFactory.getLog(GroupAdminEndpoint.class); + private MasterServices master = null; + + private static GroupInfoManagerImpl groupInfoManager; + private GroupAdminServer groupAdminServer; + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + MasterCoprocessorEnvironment menv = (MasterCoprocessorEnvironment)env; + master = menv.getMasterServices(); + groupInfoManager = new GroupInfoManagerImpl(master); + groupAdminServer = new GroupAdminServer(master, groupInfoManager); + Class clazz = + master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null); + if (!GroupableBalancer.class.isAssignableFrom(clazz)) { + throw new IOException("Configured balancer is not a GroupableBalancer"); + } + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + } + + @Override + public Service getService() { + return this; + } + + public GroupInfoManager getGroupInfoManager() { + return groupInfoManager; + } + + @Override + public void getGroupInfo(RpcController controller, + GetGroupInfoRequest request, + RpcCallback done) { + GetGroupInfoResponse response = null; + try { + GetGroupInfoResponse.Builder builder = + GetGroupInfoResponse.newBuilder(); + GroupInfo groupInfo = groupAdminServer.getGroupInfo(request.getGroupName()); + if(groupInfo != null) { + builder.setGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getGroupInfoOfTable(RpcController controller, + GetGroupInfoOfTableRequest request, + RpcCallback done) { + GetGroupInfoOfTableResponse response = null; + try { + GetGroupInfoOfTableResponse.Builder builder = + GetGroupInfoOfTableResponse.newBuilder(); + GroupInfo groupInfo = + groupAdminServer.getGroupInfoOfTable(ProtobufUtil.toTableName(request.getTableName())); + response = builder.setGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)).build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveServers(RpcController controller, + MoveServersRequest request, + RpcCallback done) { + GroupAdminProtos.MoveServersResponse response = null; + try { + GroupAdminProtos.MoveServersResponse.Builder builder = + GroupAdminProtos.MoveServersResponse.newBuilder(); + Set hostPorts = Sets.newHashSet(); + for(HBaseProtos.HostPort el: request.getServersList()) { + hostPorts.add(HostAndPort.fromParts(el.getHostName(), el.getPort())); + } + groupAdminServer.moveServers(hostPorts, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveTables(RpcController controller, + MoveTablesRequest request, + RpcCallback done) { + MoveTablesResponse response = null; + try { + MoveTablesResponse.Builder builder = + MoveTablesResponse.newBuilder(); + Set tables = new HashSet(request.getTableNameList().size()); + for(HBaseProtos.TableName tableName: request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + groupAdminServer.moveTables(tables, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void addGroup(RpcController controller, + AddGroupRequest request, + RpcCallback done) { + AddGroupResponse response = null; + try { + AddGroupResponse.Builder builder = + AddGroupResponse.newBuilder(); + groupAdminServer.addGroup(request.getGroupName()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void removeGroup(RpcController controller, + RemoveGroupRequest request, + RpcCallback done) { + RemoveGroupResponse response = null; + try { + RemoveGroupResponse.Builder builder = + RemoveGroupResponse.newBuilder(); + groupAdminServer.removeGroup(request.getGroupName()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void balanceGroup(RpcController controller, + BalanceGroupRequest request, + RpcCallback done) { + BalanceGroupResponse response = null; + try { + BalanceGroupResponse.Builder builder = + BalanceGroupResponse.newBuilder(); + builder.setBalanceRan(groupAdminServer.balanceGroup(request.getGroupName())); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void listGroupInfos(RpcController controller, + ListGroupInfosRequest request, + RpcCallback done) { + ListGroupInfosResponse response = null; + try { + ListGroupInfosResponse.Builder builder = + ListGroupInfosResponse.newBuilder(); + for(GroupInfo groupInfo: groupAdminServer.listGroups()) { + builder.addGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getGroupInfoOfServer(RpcController controller, + GetGroupInfoOfServerRequest request, + RpcCallback done) { + GetGroupInfoOfServerResponse response = null; + try { + GetGroupInfoOfServerResponse.Builder builder = + GetGroupInfoOfServerResponse.newBuilder(); + GroupInfo groupInfo = groupAdminServer.getGroupOfServer( + HostAndPort.fromParts(request.getServer().getHostName(), request.getServer().getPort())); + response = builder.setGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)).build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void preCreateTable(ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + groupAdminServer.prepareGroupForTable(desc); + } + + @Override + public void postDeleteTable(ObserverContext ctx, + TableName tableName) throws IOException { + groupAdminServer.cleanupGroupForTable(tableName); + } + + @Override + public void preCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + String group = ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(group != null && groupAdminServer.getGroupInfo(group) == null) { + throw new ConstraintException("Region server group "+group+" does not exit"); + } + } + + @Override + public void preModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + preCreateNamespace(ctx, ns); + } + + @Override + public void postCreateTable(ObserverContext ctx, + HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + } + + @Override + public void preCreateTableHandler(ObserverContext ctx, + HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + } + + @Override + public void postCreateTableHandler(ObserverContext ctx, + HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + } + + @Override + public void preDeleteTable(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preDeleteTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void postDeleteTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preTruncateTable(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void postTruncateTable(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preTruncateTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void postTruncateTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preModifyTable(ObserverContext ctx, + TableName tableName, + HTableDescriptor htd) throws IOException { + } + + @Override + public void postModifyTable(ObserverContext ctx, + TableName tableName, + HTableDescriptor htd) throws IOException { + } + + @Override + public void preModifyTableHandler(ObserverContext ctx, + TableName tableName, + HTableDescriptor htd) throws IOException { + } + + @Override + public void postModifyTableHandler(ObserverContext ctx, + TableName tableName, + HTableDescriptor htd) throws IOException { + } + + @Override + public void preAddColumn(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void preAddColumnFamily(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void postAddColumn(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void postAddColumnFamily(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void preAddColumnHandler(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void preAddColumnFamilyHandler(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void postAddColumnHandler(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void postAddColumnFamilyHandler(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void preModifyColumn(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void preModifyColumnFamily(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void postModifyColumn(ObserverContext ctx, + TableName tableName, + HColumnDescriptor columnFamily) throws IOException { + } + + @Override + public void postModifyColumnFamily(ObserverContext ctx, TableName + tableName, HColumnDescriptor columnFamily) throws IOException { + + } + + @Override + public void preModifyColumnHandler(ObserverContext ctx, TableName + tableName, HColumnDescriptor columnFamily) throws IOException { + + } + + @Override + public void preModifyColumnFamilyHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor columnFamily) + throws IOException { + + } + + @Override + public void postModifyColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor columnFamily) throws + IOException { + + } + + @Override + public void postModifyColumnFamilyHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor columnFamily) + throws IOException { + + } + + @Override + public void preDeleteColumn(ObserverContext ctx, TableName + tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void preDeleteColumnFamily(ObserverContext ctx, TableName + tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void postDeleteColumn(ObserverContext ctx, TableName + tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void postDeleteColumnFamily(ObserverContext ctx, TableName + tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void preDeleteColumnHandler(ObserverContext ctx, TableName + tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void preDeleteColumnFamilyHandler(ObserverContext ctx, + TableName tableName, byte[] columnFamily) throws + IOException { + + } + + @Override + public void postDeleteColumnHandler(ObserverContext ctx, + TableName tableName, byte[] columnFamily) throws IOException { + + } + + @Override + public void postDeleteColumnFamilyHandler(ObserverContext ctx, + TableName tableName, byte[] columnFamily) throws + IOException { + + } + + @Override + public void preEnableTable(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void postEnableTable(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void preEnableTableHandler(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void postEnableTableHandler(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void preDisableTable(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void postDisableTable(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void preDisableTableHandler(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void postDisableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preMove(ObserverContext ctx, HRegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { + + } + + @Override + public void postMove(ObserverContext ctx, HRegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { + + } + + @Override + public void preAssign(ObserverContext ctx, HRegionInfo + regionInfo) throws IOException { + + } + + @Override + public void postAssign(ObserverContext ctx, HRegionInfo + regionInfo) throws IOException { + + } + + @Override + public void preUnassign(ObserverContext ctx, HRegionInfo + regionInfo, boolean force) throws IOException { + + } + + @Override + public void postUnassign(ObserverContext ctx, HRegionInfo + regionInfo, boolean force) throws IOException { + + } + + @Override + public void preRegionOffline(ObserverContext ctx, HRegionInfo + regionInfo) throws IOException { + + } + + @Override + public void postRegionOffline(ObserverContext ctx, HRegionInfo + regionInfo) throws IOException { + + } + + @Override + public void preBalance(ObserverContext ctx) throws IOException { + + } + + @Override + public void postBalance(ObserverContext ctx, List + plans) throws IOException { + + } + + @Override + public boolean preBalanceSwitch(ObserverContext ctx, boolean + newValue) throws IOException { + return newValue; + } + + @Override + public void postBalanceSwitch(ObserverContext ctx, boolean + oldValue, boolean newValue) throws IOException { + + } + + @Override + public void preShutdown(ObserverContext ctx) throws IOException { + + } + + @Override + public void preStopMaster(ObserverContext ctx) throws IOException { + + } + + @Override + public void postStartMaster(ObserverContext ctx) throws + IOException { + + } + + @Override + public void preMasterInitialization(ObserverContext ctx) throws + IOException { + + } + + @Override + public void preSnapshot(ObserverContext ctx, SnapshotDescription + snapshot, HTableDescriptor hTableDescriptor) throws IOException { + + } + + @Override + public void postSnapshot(ObserverContext ctx, SnapshotDescription + snapshot, HTableDescriptor hTableDescriptor) throws IOException { + + } + + @Override + public void preListSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void postListSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void preCloneSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) + throws IOException { + + } + + @Override + public void postCloneSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) + throws IOException { + + } + + @Override + public void preRestoreSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) + throws IOException { + + } + + @Override + public void postRestoreSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor + hTableDescriptor) throws IOException { + + } + + @Override + public void preDeleteSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void postDeleteSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void preGetTableDescriptors(ObserverContext ctx, + List tableNamesList, List + descriptors, String regex) throws IOException { + + } + + @Override + public void postGetTableDescriptors(ObserverContext ctx, + List tableNamesList, List + descriptors, String regex) throws IOException { + + } + + @Override + public void preGetTableNames(ObserverContext ctx, + List descriptors, String regex) throws + IOException { + + } + + @Override + public void postGetTableNames(ObserverContext ctx, + List descriptors, String regex) throws + IOException { + + } + + @Override + public void postCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + + } + + @Override + public void preDeleteNamespace(ObserverContext ctx, String + namespace) throws IOException { + + } + + @Override + public void postDeleteNamespace(ObserverContext ctx, String + namespace) throws IOException { + + } + + @Override + public void postModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + + } + + @Override + public void preGetNamespaceDescriptor(ObserverContext ctx, String + namespace) throws IOException { + + } + + @Override + public void postGetNamespaceDescriptor(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + + } + + @Override + public void preListNamespaceDescriptors(ObserverContext ctx, + List descriptors) throws + IOException { + + } + + @Override + public void postListNamespaceDescriptors(ObserverContext ctx, + List descriptors) throws + IOException { + + } + + @Override + public void preTableFlush(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void postTableFlush(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void preSetUserQuota(ObserverContext ctx, String userName, + Quotas quotas) throws IOException { + + } + + @Override + public void postSetUserQuota(ObserverContext ctx, String + userName, Quotas quotas) throws IOException { + + } + + @Override + public void preSetUserQuota(ObserverContext ctx, String userName, + TableName tableName, Quotas quotas) throws IOException { + + } + + @Override + public void postSetUserQuota(ObserverContext ctx, String + userName, TableName tableName, Quotas quotas) throws IOException { + + } + + @Override + public void preSetUserQuota(ObserverContext ctx, String userName, + String namespace, Quotas quotas) throws IOException { + + } + + @Override + public void postSetUserQuota(ObserverContext ctx, String + userName, String namespace, Quotas quotas) throws IOException { + + } + + @Override + public void preSetTableQuota(ObserverContext ctx, TableName + tableName, Quotas quotas) throws IOException { + + } + + @Override + public void postSetTableQuota(ObserverContext ctx, TableName + tableName, Quotas quotas) throws IOException { + + } + + @Override + public void preSetNamespaceQuota(ObserverContext ctx, String + namespace, Quotas quotas) throws IOException { + + } + + @Override + public void postSetNamespaceQuota(ObserverContext ctx, String + namespace, Quotas quotas) throws IOException { + } + + @Override + public void preMoveServers(ObserverContext ctx, Set + servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, Set + servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, Set + tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postAddGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postRemoveGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, String groupName) + throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } + + @Override + public void preAbortProcedure(ObserverContext ctx, + ProcedureExecutor procEnv, long procId) throws IOException { + } + + @Override + public void postAbortProcedure(ObserverContext ctx) + throws IOException { + } + + @Override + public void preListProcedures(ObserverContext ctx) + throws IOException { + } + + @Override + public void postListProcedures(ObserverContext ctx, + List procInfoList) throws IOException { + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java new file mode 100644 index 0000000..de69aec --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java @@ -0,0 +1,514 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.TableLockManager.TableLock; +import org.apache.hadoop.metrics.util.MBeanUtil; + +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanRegistrationException; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.NotCompliantMBeanException; +import javax.management.ObjectName; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Service to support Region Server Grouping (HBase-6721) + */ +@InterfaceAudience.Private +public class GroupAdminServer extends GroupAdmin { + private static final Log LOG = LogFactory.getLog(GroupAdminServer.class); + + private MasterServices master; + //List of servers that are being moved from one group to another + //Key=host:port,Value=targetGroup + private ConcurrentMap serversInTransition = + new ConcurrentHashMap(); + private GroupInfoManager groupInfoManager; + + public GroupAdminServer(MasterServices master, + GroupInfoManager groupInfoManager) throws IOException { + this.master = master; + this.groupInfoManager = groupInfoManager; + registerMBean(); + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + return getGroupInfoManager().getGroup(groupName); + } + + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + String groupName = getGroupInfoManager().getGroupOfTable(tableName); + if (groupName == null) { + if(master.getTableDescriptors().get(tableName) == null) { + throw new ConstraintException("Table "+tableName+" does not exist"); + } + throw new ConstraintException("Table "+tableName+" has no group"); + } + return getGroupInfoManager().getGroup(groupName); + } + + @Override + public void moveServers(Set servers, String targetGroupName) + throws IOException { + if (servers == null) { + throw new DoNotRetryIOException( + "The list of servers cannot be null."); + } + if (StringUtils.isEmpty(targetGroupName)) { + throw new DoNotRetryIOException("The target group cannot be null."); + } + if(servers.size() < 1) { + return; + } + + GroupInfo targetGrp = getGroupInfo(targetGroupName); + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName); + } + //we only allow a move from a single source group + //so this should be ok + GroupInfo srcGrp = manager.getGroupOfServer(servers.iterator().next()); + //only move online servers (from default) + //or servers from other groups + //this prevents bogus servers from entering groups + if(GroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) { + Set onlineServers = new HashSet(); + for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { + onlineServers.add(server.getHostPort()); + } + for(HostAndPort el: servers) { + if(!onlineServers.contains(el)) { + throw new DoNotRetryIOException( + "Server "+el+" is not an online server in default group."); + } + } + } + + if(srcGrp.getServers().size() <= servers.size() && + srcGrp.getTables().size() > 0) { + throw new DoNotRetryIOException("Cannot leave a group "+srcGrp.getName()+ + " that contains tables " +"without servers."); + } + + String sourceGroupName = + getGroupInfoManager().getGroupOfServer(srcGrp.getServers().iterator().next()).getName(); + if(getGroupInfo(targetGroupName) == null) { + throw new ConstraintException("Target group does not exist: "+targetGroupName); + } + + for(HostAndPort server: servers) { + if (serversInTransition.containsKey(server)) { + throw new DoNotRetryIOException( + "Server list contains a server that is already being moved: "+server); + } + String tmpGroup = getGroupInfoManager().getGroupOfServer(server).getName(); + if (sourceGroupName != null && !tmpGroup.equals(sourceGroupName)) { + throw new DoNotRetryIOException( + "Move server request should only come from one source group. "+ + "Expecting only "+sourceGroupName+" but contains "+tmpGroup); + } + } + + if(sourceGroupName.equals(targetGroupName)) { + throw new ConstraintException( + "Target group is the same as source group: "+targetGroupName); + } + + try { + //update the servers as in transition + for (HostAndPort server : servers) { + serversInTransition.put(server, targetGroupName); + } + + getGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName); + boolean found; + List tmpServers = Lists.newArrayList(servers); + do { + found = false; + for (Iterator iter = tmpServers.iterator(); + iter.hasNext(); ) { + HostAndPort rs = iter.next(); + //get online regions + List regions = new LinkedList(); + for (Map.Entry el : + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + if (el.getValue().getHostPort().equals(rs)) { + regions.add(el.getKey()); + } + } + for (RegionState state : + master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) { + if (state.getServerName().getHostPort().equals(rs)) { + regions.add(state.getRegion()); + } + } + + //unassign regions for a server + LOG.info("Unassigning " + regions.size() + + " regions from server " + rs + " for move to " + targetGroupName); + if (regions.size() > 0) { + //TODO bulk unassign or throttled unassign? + for (HRegionInfo region : regions) { + //regions might get assigned from tables of target group + //so we need to filter + if (!targetGrp.containsTable(region.getTable())) { + master.getAssignmentManager().unassign(region); + found = true; + } + } + } + if (!found) { + iter.remove(); + } + } + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + Thread.currentThread().interrupt(); + } + } while (found); + } finally { + //remove from transition + for (HostAndPort server : servers) { + serversInTransition.remove(server); + } + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName); + } + LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName); + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + if (tables == null) { + throw new ConstraintException( + "The list of servers cannot be null."); + } + if(tables.size() < 1) { + LOG.debug("moveTables() passed an empty set. Ignoring."); + return; + } + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup); + } + + if(targetGroup != null) { + GroupInfo destGroup = manager.getGroup(targetGroup); + if(destGroup == null) { + throw new ConstraintException("Target group does not exist: "+targetGroup); + } + if(destGroup.getServers().size() < 1) { + throw new ConstraintException("Target group must have at least one server."); + } + } + + for(TableName table : tables) { + String srcGroup = manager.getGroupOfTable(table); + if(srcGroup != null && srcGroup.equals(targetGroup)) { + throw new ConstraintException("Source group is the same as target group for table "+table+" :"+srcGroup); + } + } + manager.moveTables(tables, targetGroup); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveTables(tables, targetGroup); + } + } + for(TableName table: tables) { + TableLock lock = master.getTableLockManager().writeLock(table, "Group: table move"); + try { + lock.acquire(); + for (HRegionInfo region : + master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) { + master.getAssignmentManager().unassign(region); + } + } finally { + lock.release(); + } + } + } + + @Override + public void addGroup(String name) throws IOException { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preAddGroup(name); + } + getGroupInfoManager().addGroup(new GroupInfo(name)); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postAddGroup(name); + } + } + + @Override + public void removeGroup(String name) throws IOException { + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preRemoveGroup(name); + } + GroupInfo groupInfo = getGroupInfoManager().getGroup(name); + if(groupInfo == null) { + throw new DoNotRetryIOException("Group "+name+" does not exist"); + } + int tableCount = groupInfo.getTables().size(); + if (tableCount > 0) { + throw new DoNotRetryIOException("Group "+name+" must have no associated tables: "+tableCount); + } + int serverCount = groupInfo.getServers().size(); + if(serverCount > 0) { + throw new DoNotRetryIOException("Group "+name+" must have no associated servers: "+serverCount); + } + for(NamespaceDescriptor ns: master.listNamespaceDescriptors()) { + String nsGroup = ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(nsGroup != null && nsGroup.equals(name)) { + throw new DoNotRetryIOException("Group "+name+" is referenced by namespace: "+ns.getName()); + } + } + manager.removeGroup(name); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postRemoveGroup(name); + } + } + } + + @Override + public boolean balanceGroup(String groupName) throws IOException { + ServerManager serverManager = master.getServerManager(); + AssignmentManager assignmentManager = master.getAssignmentManager(); + LoadBalancer balancer = master.getLoadBalancer(); + + boolean balancerRan; + synchronized (balancer) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preBalanceGroup(groupName); + } + // Only allow one balance run at at time. + Map groupRIT = groupGetRegionsInTransition(groupName); + if (groupRIT.size() > 0) { + LOG.debug("Not running balancer because " + + groupRIT.size() + + " region(s) in transition: " + + StringUtils.abbreviate( + master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), + 256)); + return false; + } + if (serverManager.areDeadServersInProgress()) { + LOG.debug("Not running balancer because processing dead regionserver(s): " + + serverManager.getDeadServers()); + return false; + } + + //We balance per group instead of per table + List plans = new ArrayList(); + for(Map.Entry>> tableMap: + getGroupAssignmentsByTable(groupName).entrySet()) { + LOG.info("Creating partial plan for table "+tableMap.getKey()+": "+tableMap.getValue()); + List partialPlans = balancer.balanceCluster(tableMap.getValue()); + LOG.info("Partial plan for table "+tableMap.getKey()+": "+partialPlans); + if (partialPlans != null) { + plans.addAll(partialPlans); + } + } + long startTime = System.currentTimeMillis(); + balancerRan = plans != null; + if (plans != null && !plans.isEmpty()) { + LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size()); + for (RegionPlan plan: plans) { + LOG.info("balance " + plan); + assignmentManager.balance(plan); + } + LOG.info("Group balance "+groupName+" completed after "+(System.currentTimeMillis()-startTime)+" seconds"); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postBalanceGroup(groupName, balancerRan); + } + } + return balancerRan; + } + + @Override + public List listGroups() throws IOException { + return getGroupInfoManager().listGroups(); + } + + @Override + public GroupInfo getGroupOfServer(HostAndPort hostPort) throws IOException { + return getGroupInfoManager().getGroupOfServer(hostPort); + } + + @InterfaceAudience.Private + public GroupInfoManager getGroupInfoManager() throws IOException { + return groupInfoManager; + } + + private Map groupGetRegionsInTransition(String groupName) + throws IOException { + Map rit = Maps.newTreeMap(); + AssignmentManager am = master.getAssignmentManager(); + GroupInfo groupInfo = getGroupInfo(groupName); + for(TableName tableName : groupInfo.getTables()) { + for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { + RegionState state = + master.getAssignmentManager().getRegionStates().getRegionTransitionState(regionInfo); + if(state != null) { + rit.put(regionInfo.getEncodedName(), state); + } + } + } + return rit; + } + + private Map>> + getGroupAssignmentsByTable(String groupName) throws IOException { + Map>> result = Maps.newHashMap(); + GroupInfo groupInfo = getGroupInfo(groupName); + Map>> assignments = Maps.newHashMap(); + for(Map.Entry entry: + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + TableName currTable = entry.getKey().getTable(); + ServerName currServer = entry.getValue(); + HRegionInfo currRegion = entry.getKey(); + if(groupInfo.getTables().contains(currTable)) { + if(!assignments.containsKey(entry.getKey().getTable())) { + assignments.put(currTable, new HashMap>()); + } + if(!assignments.get(currTable).containsKey(currServer)) { + assignments.get(currTable).put(currServer, new ArrayList()); + } + assignments.get(currTable).get(currServer).add(currRegion); + } + } + + Map> serverMap = Maps.newHashMap(); + for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { + if(groupInfo.getServers().contains(serverName.getHostPort())) { + serverMap.put(serverName, Collections.EMPTY_LIST); + } + } + + //add all tables that are members of the group + for(TableName tableName : groupInfo.getTables()) { + if(assignments.containsKey(tableName)) { + result.put(tableName, new HashMap>()); + result.get(tableName).putAll(serverMap); + result.get(tableName).putAll(assignments.get(tableName)); + LOG.debug("Adding assignments for "+tableName+": "+assignments.get(tableName)); + } + } + + return result; + } + + void registerMBean() { + GroupMXBeanImpl mxBeanInfo = + GroupMXBeanImpl.init(this, master); + try { + ObjectName name + = new ObjectName("Hadoop:service=HBase,name=Master,sub=rsGroup"); + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + mbs.registerMBean(mxBeanInfo, name); + } catch (InstanceAlreadyExistsException var6) { + } catch (Exception var7) { + throw new IllegalStateException("Failed to register rsGroup MBean"); + } + LOG.info("Registered Group MXBean"); + } + + public void prepareGroupForTable(HTableDescriptor desc) throws IOException { + String groupName = + master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString()) + .getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if (groupName == null) { + groupName = GroupInfo.DEFAULT_GROUP; + } + GroupInfo groupInfo = getGroupInfo(groupName); + if (groupInfo == null) { + throw new ConstraintException("Group " + groupName + " does not exist."); + } + if (!groupInfo.containsTable(desc.getTableName())) { + LOG.debug("Pre-moving table " + desc.getTableName() + " to group " + groupName); + moveTables(Sets.newHashSet(desc.getTableName()), groupName); + } + } + + public void cleanupGroupForTable(TableName tableName) throws IOException { + try { + GroupInfo group = getGroupInfoOfTable(tableName); + if (group != null) { + LOG.debug("Removing deleted table from table group " + group.getName()); + moveTables(Sets.newHashSet(tableName), null); + } + } catch (ConstraintException ex) { + LOG.debug("Failed to perform group information cleanup for table: " + tableName, ex); + } catch (IOException ex) { + LOG.debug("Failed to perform group information cleanup for table: " + tableName, ex); + } + } + + @Override + public void close() throws IOException { + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java new file mode 100644 index 0000000..88ae6ba --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java @@ -0,0 +1,428 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.LinkedListMultimap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.net.HostAndPort; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; +import org.apache.hadoop.util.ReflectionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) + * It does region balance based on a table's group membership. + * + * Most assignment methods contain two exclusive code paths: Online - when the group + * table is online and Offline - when it is unavailable. + * + * During Offline, assignments are assigned based on cached information in zookeeper. + * If unavailable (ie bootstrap) then regions are assigned randomly. + * + * Once the GROUP table has been assigned, the balancer switches to Online and will then + * start providing appropriate assignments for user tables. + * + */ +@InterfaceAudience.Private +public class GroupBasedLoadBalancer implements GroupableBalancer, LoadBalancer { + /** Config for pluggable load balancers */ + public static final String HBASE_GROUP_LOADBALANCER_CLASS = "hbase.group.grouploadbalancer.class"; + + private static final Log LOG = LogFactory.getLog(GroupBasedLoadBalancer.class); + + private Configuration config; + private ClusterStatus clusterStatus; + private MasterServices masterServices; + private GroupInfoManager groupInfoManager; + private LoadBalancer internalBalancer; + + //used during reflection by LoadBalancerFactory + @InterfaceAudience.Private + public GroupBasedLoadBalancer() { + } + + //This constructor should only be used for unit testing + @InterfaceAudience.Private + public GroupBasedLoadBalancer(GroupInfoManager groupInfoManager) { + this.groupInfoManager = groupInfoManager; + } + + @Override + public Configuration getConf() { + return config; + } + + @Override + public void setConf(Configuration conf) { + this.config = conf; + } + + @Override + public void setClusterStatus(ClusterStatus st) { + this.clusterStatus = st; + } + + @Override + public void setMasterServices(MasterServices masterServices) { + this.masterServices = masterServices; + } + + @Override + public List balanceCluster(TableName tableName, Map> + clusterState) throws HBaseIOException { + return balanceCluster(clusterState); + } + + @Override + public List balanceCluster(Map> clusterState) + throws HBaseIOException { + if (!isOnline()) { + throw new ConstraintException(GroupInfoManager.GROUP_TABLE_NAME+ + " is not online, unable to perform balance"); + } + + Map> correctedState = correctAssignments(clusterState); + List regionPlans = new ArrayList(); + try { + for (GroupInfo info : groupInfoManager.listGroups()) { + Map> groupClusterState = new HashMap>(); + for (HostAndPort sName : info.getServers()) { + for(ServerName curr: clusterState.keySet()) { + if(curr.getHostPort().equals(sName)) { + groupClusterState.put(curr, correctedState.get(curr)); + } + } + } + List groupPlans = this.internalBalancer + .balanceCluster(groupClusterState); + if (groupPlans != null) { + regionPlans.addAll(groupPlans); + } + } + } catch (IOException exp) { + LOG.warn("Exception while balancing cluster.", exp); + regionPlans.clear(); + } + return regionPlans; + } + + @Override + public Map> roundRobinAssignment ( + List regions, List servers) throws HBaseIOException { + Map> assignments = Maps.newHashMap(); + ListMultimap regionMap = ArrayListMultimap.create(); + ListMultimap serverMap = ArrayListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + Map> result = + this.internalBalancer.roundRobinAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey)); + if(result != null) { + assignments.putAll(result); + } + } + } + return assignments; + } + + @Override + public Map> retainAssignment( + Map regions, List servers) throws HBaseIOException { + try { + Map> assignments = new TreeMap>(); + ListMultimap groupToRegion = ArrayListMultimap.create(); + Set misplacedRegions = getMisplacedRegions(regions); + for (HRegionInfo region : regions.keySet()) { + if (!misplacedRegions.contains(region)) { + String groupName = groupInfoManager.getGroupOfTable(region.getTable()); + groupToRegion.put(groupName, region); + } + } + // Now the "groupToRegion" map has only the regions which have correct + // assignments. + for (String key : groupToRegion.keySet()) { + Map currentAssignmentMap = new TreeMap(); + List regionList = groupToRegion.get(key); + GroupInfo info = groupInfoManager.getGroup(key); + List candidateList = filterOfflineServers(info, servers); + for (HRegionInfo region : regionList) { + currentAssignmentMap.put(region, regions.get(region)); + } + if(candidateList.size() > 0) { + assignments.putAll(this.internalBalancer.retainAssignment( + currentAssignmentMap, candidateList)); + } + } + + for (HRegionInfo region : misplacedRegions) { + String groupName = groupInfoManager.getGroupOfTable( + region.getTable()); + GroupInfo info = groupInfoManager.getGroup(groupName); + List candidateList = filterOfflineServers(info, servers); + ServerName server = this.internalBalancer.randomAssignment(region, + candidateList); + if (server != null && !assignments.containsKey(server)) { + assignments.put(server, new ArrayList()); + } else if (server != null) { + assignments.get(server).add(region); + } else { + //if not server is available assign to bogus so it ends up in RIT + if(!assignments.containsKey(BOGUS_SERVER_NAME)) { + assignments.put(BOGUS_SERVER_NAME, new ArrayList()); + } + assignments.get(BOGUS_SERVER_NAME).add(region); + } + } + return assignments; + } catch (IOException e) { + throw new HBaseIOException("Failed to do online retain assignment", e); + } + } + + @Override + public Map immediateAssignment( + List regions, List servers) throws HBaseIOException { + Map assignments = Maps.newHashMap(); + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + assignments.putAll( + this.internalBalancer.immediateAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey))); + } + } + return assignments; + } + + @Override + public ServerName randomAssignment(HRegionInfo region, + List servers) throws HBaseIOException { + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap); + List filteredServers = serverMap.get(regionMap.keySet().iterator().next()); + return this.internalBalancer.randomAssignment(region, filteredServers); + } + + private void generateGroupMaps( + List regions, + List servers, + ListMultimap regionMap, + ListMultimap serverMap) throws HBaseIOException { + try { + for (HRegionInfo region : regions) { + String groupName = groupInfoManager.getGroupOfTable(region.getTable()); + if(groupName == null) { + LOG.warn("Group for table "+region.getTable()+" is null"); + } + regionMap.put(groupName, region); + } + for (String groupKey : regionMap.keySet()) { + GroupInfo info = groupInfoManager.getGroup(groupKey); + serverMap.putAll(groupKey, filterOfflineServers(info, servers)); + if(serverMap.get(groupKey).size() < 1) { + serverMap.put(groupKey, BOGUS_SERVER_NAME); + } + } + } catch(IOException e) { + throw new HBaseIOException("Failed to generate group maps", e); + } + } + + private List filterOfflineServers(GroupInfo groupInfo, + List onlineServers) { + if (groupInfo != null) { + return filterServers(groupInfo.getServers(), onlineServers); + } else { + LOG.debug("Group Information found to be null. Some regions might be unassigned."); + return Collections.EMPTY_LIST; + } + } + + /** + * Filter servers based on the online servers. + * + * @param servers + * the servers + * @param onlineServers + * List of servers which are online. + * @return the list + */ + private List filterServers(Collection servers, + Collection onlineServers) { + ArrayList finalList = new ArrayList(); + for (HostAndPort server : servers) { + for(ServerName curr: onlineServers) { + if(curr.getHostPort().equals(server)) { + finalList.add(curr); + } + } + } + return finalList; + } + + private ListMultimap groupRegions( + List regionList) throws IOException { + ListMultimap regionGroup = ArrayListMultimap + .create(); + for (HRegionInfo region : regionList) { + String groupName = groupInfoManager.getGroupOfTable(region.getTable()); + regionGroup.put(groupName, region); + } + return regionGroup; + } + + private Set getMisplacedRegions( + Map regions) throws IOException { + Set misplacedRegions = new HashSet(); + for (HRegionInfo region : regions.keySet()) { + ServerName assignedServer = regions.get(region); + GroupInfo info = groupInfoManager.getGroup(groupInfoManager.getGroupOfTable(region.getTable())); + if (assignedServer != null && + (info == null || !info.containsServer(assignedServer.getHostPort()))) { + LOG.debug("Found misplaced region: " + region.getRegionNameAsString() + + " on server: " + assignedServer + + " found in group: " + groupInfoManager.getGroupOfServer(assignedServer.getHostPort()) + + " outside of group: " + info.getName()); + misplacedRegions.add(region); + } + } + return misplacedRegions; + } + + private Map> correctAssignments( + Map> existingAssignments){ + Map> correctAssignments = new TreeMap>(); + List misplacedRegions = new LinkedList(); + for (ServerName sName : existingAssignments.keySet()) { + correctAssignments.put(sName, new LinkedList()); + List regions = existingAssignments.get(sName); + for (HRegionInfo region : regions) { + GroupInfo info = null; + try { + info = groupInfoManager.getGroup(groupInfoManager.getGroupOfTable(region.getTable())); + }catch(IOException exp){ + LOG.debug("Group information null for region of table " + region.getTable(), + exp); + } + if ((info == null) || (!info.containsServer(sName.getHostPort()))) { + // Misplaced region. + misplacedRegions.add(region); + } else { + correctAssignments.get(sName).add(region); + } + } + } + + //TODO bulk unassign? + //unassign misplaced regions, so that they are assigned to correct groups. + for(HRegionInfo info: misplacedRegions) { + this.masterServices.getAssignmentManager().unassign(info); + } + return correctAssignments; + } + + @Override + public void initialize() throws HBaseIOException { + try { + if (groupInfoManager == null) { + List cps = + masterServices.getMasterCoprocessorHost().findCoprocessors(GroupAdminEndpoint.class); + if (cps.size() != 1) { + String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size(); + LOG.error(msg); + throw new HBaseIOException(msg); + } + groupInfoManager = cps.get(0).getGroupInfoManager(); + } + } catch (IOException e) { + throw new HBaseIOException("Failed to initialize GroupInfoManagerImpl", e); + } + + // Create the balancer + Class balancerKlass = config.getClass( + HBASE_GROUP_LOADBALANCER_CLASS, + StochasticLoadBalancer.class, LoadBalancer.class); + internalBalancer = ReflectionUtils.newInstance(balancerKlass, config); + internalBalancer.setClusterStatus(clusterStatus); + internalBalancer.setMasterServices(masterServices); + internalBalancer.setConf(config); + internalBalancer.initialize(); + } + + public boolean isOnline() { + return groupInfoManager != null && groupInfoManager.isOnline(); + } + + @Override + public void regionOnline(HRegionInfo regionInfo, ServerName sn) { + } + + @Override + public void regionOffline(HRegionInfo regionInfo) { + } + + @Override + public void onConfigurationChange(Configuration conf) { + //DO nothing for now + } + + @Override + public void stop(String why) { + } + + @Override + public boolean isStopped() { + return false; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java new file mode 100644 index 0000000..ca296b9 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java @@ -0,0 +1,131 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.net.HostAndPort; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Interface used to manage GroupInfo storage. An implementation + * has the option to support offline mode. + * See {@link GroupBasedLoadBalancer} + */ +public interface GroupInfoManager { + //Assigned before user tables + public static final TableName GROUP_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR,"rsgroup"); + public static final byte[] GROUP_TABLE_NAME_BYTES = GROUP_TABLE_NAME.toBytes(); + public static final String groupZNode = "groupInfo"; + public static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); + public static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); + public static final byte[] ROW_KEY = {0}; + + + /** + * Adds the group. + * + * @param groupInfo the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void addGroup(GroupInfo groupInfo) throws IOException; + + /** + * Remove a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void removeGroup(String groupName) throws IOException; + + /** + * move servers to a new group. + * @param hostPorts list of servers, must be part of the same group + * @param srcGroup + * @param dstGroup + * @return true if move was successful + * @throws java.io.IOException + */ + boolean moveServers(Set hostPorts, + String srcGroup, String dstGroup) throws IOException; + + /** + * Gets the group info of server. + * + * @param hostPort the server + * @return An instance of GroupInfo + */ + GroupInfo getGroupOfServer(HostAndPort hostPort) throws IOException; + + /** + * Gets the group information. + * + * @param groupName the group name + * @return An instance of GroupInfo + */ + GroupInfo getGroup(String groupName) throws IOException; + + /** + * Get the group membership of a table + * @param tableName + * @return Group name of table + * @throws java.io.IOException + */ + String getGroupOfTable(TableName tableName) throws IOException; + + /** + * Set the group membership of a set of tables + * + * @param tableNames + * @param groupName + * @throws java.io.IOException + */ + void moveTables(Set tableNames, String groupName) throws IOException; + + /** + * List the groups + * + * @return list of GroupInfo + * @throws java.io.IOException + */ + List listGroups() throws IOException; + + /** + * Refresh/reload the group information from + * the persistent store + * + * @throws java.io.IOException + */ + void refresh() throws IOException; + + /** + * Whether the manager is able to fully + * return group metadata + * + * @return + */ + boolean isOnline(); +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java new file mode 100644 index 0000000..843442e --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java @@ -0,0 +1,736 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import com.google.protobuf.ServiceException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; +import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.GroupProtos; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; +import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; +import org.apache.hadoop.hbase.security.access.AccessControlLists; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This is an implementation of {@link GroupInfoManager}. Which makes + * use of an HBase table as the persistence store for the group information. + * It also makes use of zookeeper to store group information needed + * for bootstrapping during offline mode. + */ +public class GroupInfoManagerImpl implements GroupInfoManager, ServerListener { + private static final Log LOG = LogFactory.getLog(GroupInfoManagerImpl.class); + + /** Table descriptor for hbase:rsgroup catalog table */ + private final static HTableDescriptor GROUP_TABLE_DESC; + static { + GROUP_TABLE_DESC = new HTableDescriptor(GROUP_TABLE_NAME_BYTES); + GROUP_TABLE_DESC.addFamily(new HColumnDescriptor(META_FAMILY_BYTES)); + GROUP_TABLE_DESC.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); + try { + GROUP_TABLE_DESC.addCoprocessor( + MultiRowMutationEndpoint.class.getName(), + null, Coprocessor.PRIORITY_SYSTEM, null); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + private volatile Map groupMap; + private volatile Map tableMap; + private MasterServices master; + private Table groupTable; + private ClusterConnection conn; + private ZooKeeperWatcher watcher; + private GroupStartupWorker groupStartupWorker; + // contains list of groups that were last flushed to persistent store + private volatile Set prevGroups; + private GroupSerDe groupSerDe; + private DefaultServerUpdater defaultServerUpdater; + + + public GroupInfoManagerImpl(MasterServices master) throws IOException { + this.groupMap = Collections.EMPTY_MAP; + this.tableMap = Collections.EMPTY_MAP; + groupSerDe = new GroupSerDe(); + this.master = master; + this.watcher = master.getZooKeeper(); + this.conn = master.getConnection(); + groupStartupWorker = new GroupStartupWorker(this, master, conn); + prevGroups = new HashSet(); + refresh(); + groupStartupWorker.start(); + defaultServerUpdater = new DefaultServerUpdater(this); + master.getServerManager().registerListener(this); + defaultServerUpdater.start(); + } + + /** + * Adds the group. + * + * @param groupInfo the group name + */ + @Override + public synchronized void addGroup(GroupInfo groupInfo) throws IOException { + if (groupMap.get(groupInfo.getName()) != null || + groupInfo.getName().equals(GroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group already exists: "+groupInfo.getName()); + } + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(groupInfo.getName(), groupInfo); + flushConfig(newGroupMap); + } + + @Override + public synchronized boolean moveServers(Set hostPorts, String srcGroup, + String dstGroup) throws IOException { + GroupInfo src = new GroupInfo(getGroup(srcGroup)); + GroupInfo dst = new GroupInfo(getGroup(dstGroup)); + boolean foundOne = false; + for(HostAndPort el: hostPorts) { + foundOne = src.removeServer(el) || foundOne; + dst.addServer(el); + } + + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(src.getName(), src); + newGroupMap.put(dst.getName(), dst); + + flushConfig(newGroupMap); + return foundOne; + } + + /** + * Gets the group info of server. + * + * @param hostPort the server + * @return An instance of GroupInfo. + */ + @Override + public GroupInfo getGroupOfServer(HostAndPort hostPort) throws IOException { + for (GroupInfo info : groupMap.values()) { + if (info.containsServer(hostPort)){ + return info; + } + } + return getGroup(GroupInfo.DEFAULT_GROUP); + } + + /** + * Gets the group information. + * + * @param groupName + * the group name + * @return An instance of GroupInfo + */ + @Override + public GroupInfo getGroup(String groupName) throws IOException { + GroupInfo groupInfo = groupMap.get(groupName); + return groupInfo; + } + + + + @Override + public String getGroupOfTable(TableName tableName) throws IOException { + return tableMap.get(tableName); + } + + @Override + public synchronized void moveTables(Set tableNames, String groupName) throws IOException { + if (groupName != null && !groupMap.containsKey(groupName)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a special group"); + } + Map newGroupMap = Maps.newHashMap(groupMap); + for(TableName tableName: tableNames) { + if (tableMap.containsKey(tableName)) { + GroupInfo src = new GroupInfo(groupMap.get(tableMap.get(tableName))); + src.removeTable(tableName); + newGroupMap.put(src.getName(), src); + } + if(groupName != null) { + GroupInfo dst = new GroupInfo(newGroupMap.get(groupName)); + dst.addTable(tableName); + newGroupMap.put(dst.getName(), dst); + } + } + + flushConfig(newGroupMap); + } + + + /** + * Delete a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + @Override + public synchronized void removeGroup(String groupName) throws IOException { + if (!groupMap.containsKey(groupName) || groupName.equals(GroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a reserved group"); + } + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.remove(groupName); + flushConfig(newGroupMap); + } + + @Override + public List listGroups() throws IOException { + List list = Lists.newLinkedList(groupMap.values()); + return list; + } + + @Override + public boolean isOnline() { + return groupStartupWorker.isOnline(); + } + + @Override + public synchronized void refresh() throws IOException { + refresh(false); + } + + private synchronized void refresh(boolean forceOnline) throws IOException { + List groupList = new LinkedList(); + + // overwrite anything read from zk, group table is source of truth + // if online read from GROUP table + if (forceOnline || isOnline()) { + LOG.debug("Refreshing in Online mode."); + if (groupTable == null) { + groupTable = conn.getTable(GROUP_TABLE_NAME); + } + groupList.addAll(groupSerDe.retrieveGroupList(groupTable)); + } else { + LOG.debug("Refershing in Offline mode."); + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, groupZNode); + groupList.addAll(groupSerDe.retrieveGroupList(watcher, groupBasePath)); + } + + // refresh default group, prune + NavigableSet orphanTables = new TreeSet(); + for(String entry: master.getTableDescriptors().getAll().keySet()) { + orphanTables.add(TableName.valueOf(entry)); + } + + List specialTables; + if(!master.isInitialized()) { + specialTables = new ArrayList(); + specialTables.add(AccessControlLists.ACL_TABLE_NAME); + specialTables.add(TableName.META_TABLE_NAME); + specialTables.add(TableName.NAMESPACE_TABLE_NAME); + specialTables.add(GroupInfoManager.GROUP_TABLE_NAME); + } else { + specialTables = + master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + } + + for(TableName table : specialTables) { + orphanTables.add(table); + } + for(GroupInfo group: groupList) { + if(!group.getName().equals(GroupInfo.DEFAULT_GROUP)) { + orphanTables.removeAll(group.getTables()); + } + } + + // This is added to the last of the list + // so it overwrites the default group loaded + // from region group table or zk + groupList.add(new GroupInfo(GroupInfo.DEFAULT_GROUP, + Sets.newHashSet(getDefaultServers()), + orphanTables)); + + + // populate the data + HashMap newGroupMap = Maps.newHashMap(); + HashMap newTableMap = Maps.newHashMap(); + for (GroupInfo group : groupList) { + newGroupMap.put(group.getName(), group); + for(TableName table: group.getTables()) { + newTableMap.put(table, group.getName()); + } + } + groupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + prevGroups.clear(); + prevGroups.addAll(groupMap.keySet()); + } + + private synchronized Map flushConfigTable(Map newGroupMap) + throws IOException { + Map newTableMap = Maps.newHashMap(); + List mutations = Lists.newArrayList(); + + // populate deletes + for(String groupName : prevGroups) { + if(!newGroupMap.containsKey(groupName)) { + Delete d = new Delete(Bytes.toBytes(groupName)); + mutations.add(d); + } + } + + // populate puts + for(GroupInfo groupInfo : newGroupMap.values()) { + GroupProtos.GroupInfo proto = ProtobufUtil.toProtoGroupInfo(groupInfo); + Put p = new Put(Bytes.toBytes(groupInfo.getName())); + p.addColumn(META_FAMILY_BYTES, + META_QUALIFIER_BYTES, + proto.toByteArray()); + mutations.add(p); + for(TableName entry: groupInfo.getTables()) { + newTableMap.put(entry, groupInfo.getName()); + } + } + + if(mutations.size() > 0) { + multiMutate(mutations); + } + return newTableMap; + } + + private synchronized void flushConfig(Map newGroupMap) throws IOException { + Map newTableMap; + + // For offline mode persistence is still unavailable + // We're refreshing in-memory state but only for default servers + if (!isOnline()) { + Map m = Maps.newHashMap(groupMap); + GroupInfo oldDefaultGroup = m.remove(GroupInfo.DEFAULT_GROUP); + GroupInfo newDefaultGroup = newGroupMap.remove(GroupInfo.DEFAULT_GROUP); + if (!m.equals(newGroupMap) || + !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables())) { + throw new IOException("Only default servers can be updated during offline mode"); + } + newGroupMap.put(GroupInfo.DEFAULT_GROUP, newDefaultGroup); + groupMap = newGroupMap; + return; + } + + newTableMap = flushConfigTable(newGroupMap); + + // make changes visible since it has been + // persisted in the source of truth + groupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + + try { + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, groupZNode); + ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC); + + List zkOps = new ArrayList(newGroupMap.size()); + for(String groupName : prevGroups) { + if(!newGroupMap.containsKey(groupName)) { + String znode = ZKUtil.joinZNode(groupBasePath, groupName); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + } + } + + + for(GroupInfo groupInfo : newGroupMap.values()) { + String znode = ZKUtil.joinZNode(groupBasePath, groupInfo.getName()); + GroupProtos.GroupInfo proto = ProtobufUtil.toProtoGroupInfo(groupInfo); + LOG.debug("Updating znode: "+znode); + ZKUtil.createAndFailSilent(watcher, znode); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, + ProtobufUtil.prependPBMagic(proto.toByteArray()))); + } + LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); + + ZKUtil.multiOrSequential(watcher, zkOps, false); + } catch (KeeperException e) { + LOG.error("Failed to write to groupZNode", e); + master.abort("Failed to write to groupZNode", e); + throw new IOException("Failed to write to groupZNode",e); + } + + prevGroups.clear(); + prevGroups.addAll(newGroupMap.keySet()); + } + + private List getOnlineRS() throws IOException { + if (master != null) { + return master.getServerManager().getOnlineServersList(); + } + try { + LOG.debug("Reading online RS from zookeeper"); + List servers = new LinkedList(); + for (String el: ZKUtil.listChildrenNoWatch(watcher, watcher.rsZNode)) { + servers.add(ServerName.parseServerName(el)); + } + return servers; + } catch (KeeperException e) { + throw new IOException("Failed to retrieve server list from zookeeper", e); + } + } + + private List getDefaultServers() throws IOException { + List defaultServers = new LinkedList(); + for(ServerName server : getOnlineRS()) { + HostAndPort hostPort = HostAndPort.fromParts(server.getHostname(), server.getPort()); + boolean found = false; + for(GroupInfo groupInfo : groupMap.values()) { + if(!GroupInfo.DEFAULT_GROUP.equals(groupInfo.getName()) && + groupInfo.containsServer(hostPort)) { + found = true; + break; + } + } + if(!found) { + defaultServers.add(hostPort); + } + } + return defaultServers; + } + + private synchronized void updateDefaultServers( + Set hostPort) throws IOException { + GroupInfo info = groupMap.get(GroupInfo.DEFAULT_GROUP); + GroupInfo newInfo = new GroupInfo(info.getName(), hostPort, info.getTables()); + HashMap newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(newInfo.getName(), newInfo); + flushConfig(newGroupMap); + } + + @Override + public void serverAdded(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + @Override + public void serverRemoved(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + private static class DefaultServerUpdater extends Thread { + private static final Log LOG = LogFactory.getLog(DefaultServerUpdater.class); + private GroupInfoManagerImpl mgr; + private boolean hasChanged = false; + + public DefaultServerUpdater(GroupInfoManagerImpl mgr) { + this.mgr = mgr; + } + + @Override + public void run() { + List prevDefaultServers = new LinkedList(); + while(!mgr.master.isAborted() || !mgr.master.isStopped()) { + try { + LOG.info("Updating default servers."); + List servers = mgr.getDefaultServers(); + Collections.sort(servers, new Comparator() { + @Override + public int compare(HostAndPort o1, HostAndPort o2) { + int diff = o1.getHostText().compareTo(o2.getHostText()); + if (diff != 0) { + return diff; + } + return o1.getPort() - o2.getPort(); + } + }); + if(!servers.equals(prevDefaultServers)) { + mgr.updateDefaultServers(Sets.newHashSet(servers)); + prevDefaultServers = servers; + LOG.info("Updated with servers: "+servers.size()); + } + try { + synchronized (this) { + if(!hasChanged) { + wait(); + } + hasChanged = false; + } + } catch (InterruptedException e) { + } + } catch (IOException e) { + LOG.warn("Failed to update default servers", e); + } + } + } + + public void serverChanged() { + synchronized (this) { + hasChanged = true; + this.notify(); + } + } + } + + + private static class GroupStartupWorker extends Thread { + private static final Log LOG = LogFactory.getLog(GroupStartupWorker.class); + + private Configuration conf; + private volatile boolean isOnline = false; + private MasterServices masterServices; + private GroupInfoManagerImpl groupInfoManager; + private ClusterConnection conn; + + public GroupStartupWorker(GroupInfoManagerImpl groupInfoManager, + MasterServices masterServices, + ClusterConnection conn) { + this.conf = masterServices.getConfiguration(); + this.masterServices = masterServices; + this.groupInfoManager = groupInfoManager; + this.conn = conn; + setName(GroupStartupWorker.class.getName()+"-"+masterServices.getServerName()); + setDaemon(true); + } + + @Override + public void run() { + if(waitForGroupTableOnline()) { + LOG.info("GroupBasedLoadBalancer is now online"); + } + } + + public boolean waitForGroupTableOnline() { + final List foundRegions = new LinkedList(); + final List assignedRegions = new LinkedList(); + final AtomicBoolean found = new AtomicBoolean(false); + final TableStateManager tsm = masterServices.getTableStateManager(); + boolean createSent = false; + while (!found.get() && isMasterRunning()) { + foundRegions.clear(); + assignedRegions.clear(); + found.set(true); + try { + final Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME); + final Table groupTable = conn.getTable(GroupInfoManager.GROUP_TABLE_NAME); + boolean rootMetaFound = + masterServices.getMetaTableLocator().verifyMetaRegionLocation( + conn, + masterServices.getZooKeeper(), + 1); + final AtomicBoolean nsFound = new AtomicBoolean(false); + if (rootMetaFound) { + + MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() { + @Override + public boolean visitInternal(Result row) throws IOException { + HRegionInfo info = HRegionInfo.getHRegionInfo(row); + if (info != null) { + Cell serverCell = + row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + if (GROUP_TABLE_NAME.equals(info.getTable()) && serverCell != null) { + ServerName sn = + ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell)); + if (sn == null) { + found.set(false); + } else if (tsm.isTableState(GROUP_TABLE_NAME, TableState.State.ENABLED)) { + try { + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ClientProtos.GetRequest request = + RequestConverter.buildGetRequest(info.getRegionName(), + new Get(ROW_KEY)); + rs.get(null, request); + assignedRegions.add(info); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + foundRegions.add(info); + } + if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) { + Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + ServerName sn = null; + if(cell != null) { + sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell)); + } + if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME, + TableState.State.ENABLED)) { + try { + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ClientProtos.GetRequest request = + RequestConverter.buildGetRequest(info.getRegionName(), + new Get(ROW_KEY)); + rs.get(null, request); + nsFound.set(true); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + } + } + return true; + } + }; + MetaTableAccessor.fullScanRegions(conn, visitor); + // if no regions in meta then we have to create the table + if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) { + groupInfoManager.createGroupTable(masterServices); + createSent = true; + } + LOG.info("Group table: " + GROUP_TABLE_NAME + " isOnline: " + found.get() + + ", regionCount: " + foundRegions.size() + ", assignCount: " + + assignedRegions.size() + ", rootMetaFound: "+rootMetaFound); + found.set(found.get() && assignedRegions.size() == foundRegions.size() + && foundRegions.size() > 0); + } else { + LOG.info("Waiting for catalog tables to come online"); + found.set(false); + } + if (found.get()) { + LOG.debug("With group table online, refreshing cached information."); + groupInfoManager.refresh(true); + isOnline = true; + //flush any inconsistencies between ZK and HTable + groupInfoManager.flushConfig(groupInfoManager.groupMap); + } + } catch(Exception e) { + found.set(false); + LOG.warn("Failed to perform check", e); + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + LOG.info("Sleep interrupted", e); + } + } + return found.get(); + } + + public boolean isOnline() { + return isOnline; + } + + private boolean isMasterRunning() { + return !masterServices.isAborted() && !masterServices.isStopped(); + } + } + + private void createGroupTable(MasterServices masterServices) throws IOException { + HRegionInfo[] newRegions = + ModifyRegionUtils.createHRegionInfos(GROUP_TABLE_DESC, null); + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(); + masterServices.getMasterProcedureExecutor().submitProcedure( + new CreateTableProcedure( + masterServices.getMasterProcedureExecutor().getEnvironment(), + GROUP_TABLE_DESC, + newRegions, + latch), + HConstants.NO_NONCE, + HConstants.NO_NONCE); + latch.await(); + // wait for region to be online + int tries = 600; + while(masterServices.getAssignmentManager().getRegionStates() + .getRegionServerOfRegion(newRegions[0]) == null && tries > 0) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new IOException("Wait interrupted", e); + } + tries--; + } + if(tries <= 0) { + throw new IOException("Failed to create group table."); + } + } + + private void multiMutate(List mutations) + throws IOException { + CoprocessorRpcChannel channel = groupTable.coprocessorService(ROW_KEY); + MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder + = MultiRowMutationProtos.MutateRowsRequest.newBuilder(); + for (Mutation mutation : mutations) { + if (mutation instanceof Put) { + mmrBuilder.addMutationRequest(ProtobufUtil.toMutation( + ClientProtos.MutationProto.MutationType.PUT, mutation)); + } else if (mutation instanceof Delete) { + mmrBuilder.addMutationRequest(ProtobufUtil.toMutation( + ClientProtos.MutationProto.MutationType.DELETE, mutation)); + } else { + throw new DoNotRetryIOException("multiMutate doesn't support " + + mutation.getClass().getName()); + } + } + + MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service = + MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel); + try { + service.mutateRows(null, mmrBuilder.build()); + } catch (ServiceException ex) { + ProtobufUtil.toIOException(ex); + } + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMXBean.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMXBean.java new file mode 100644 index 0000000..d87dd63 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMXBean.java @@ -0,0 +1,69 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.net.HostAndPort; +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public interface GroupMXBean { + + public List getGroups() throws IOException; + + public static class GroupInfoBean { + + private String name; + private List servers; + private List tables; + private List offlineServers; + + //Need this to convert NavigableSet to List + public GroupInfoBean(GroupInfo groupInfo, List offlineServers) { + this.name = groupInfo.getName(); + this.offlineServers = offlineServers; + this.servers = new LinkedList(); + this.servers.addAll(groupInfo.getServers()); + this.tables = new LinkedList(); + this.tables.addAll(groupInfo.getTables()); + } + + public String getName() { + return name; + } + + public List getServers() { + return servers; + } + + public List getOfflineServers() { + return offlineServers; + } + + public List getTables() { + return tables; + } + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMXBeanImpl.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMXBeanImpl.java new file mode 100644 index 0000000..895e967 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupMXBeanImpl.java @@ -0,0 +1,80 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.MasterServices; + +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class GroupMXBeanImpl implements GroupMXBean { + private static final Log LOG = LogFactory.getLog(GroupMXBeanImpl.class); + + private static GroupMXBeanImpl instance = null; + + private GroupAdmin groupAdmin; + private MasterServices master; + + public synchronized static GroupMXBeanImpl init( + final GroupAdmin groupAdmin, + MasterServices master) { + if (instance == null) { + instance = new GroupMXBeanImpl(groupAdmin, master); + } + return instance; + } + + protected GroupMXBeanImpl(final GroupAdmin groupAdmin, + MasterServices master) { + this.groupAdmin = groupAdmin; + this.master = master; + } + + @Override + public List getGroups() throws IOException { + Set onlineServers = Sets.newHashSet(); + for (ServerName entry: master.getServerManager().getOnlineServersList()) { + onlineServers.add(HostAndPort.fromParts(entry.getHostname(), entry.getPort())); + } + List list = Lists.newArrayList(); + for (GroupInfo group: groupAdmin.listGroups()) { + List deadServers = Lists.newArrayList(); + for (HostAndPort server: group.getServers()) { + if (!onlineServers.contains(server)) { + deadServers.add(server); + } + } + list.add(new GroupInfoBean(group, deadServers)); + } + return list; + } + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java new file mode 100644 index 0000000..f60a842 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java @@ -0,0 +1,87 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.GroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; + +//TODO do better encapsulation of SerDe logic from GroupInfoManager and GroupTracker +public class GroupSerDe { + private static final Log LOG = LogFactory.getLog(GroupSerDe.class); + + public GroupSerDe() { + + } + + public List retrieveGroupList(Table groupTable) throws IOException { + List groupInfoList = Lists.newArrayList(); + for (Result result : groupTable.getScanner(new Scan())) { + GroupProtos.GroupInfo proto = + GroupProtos.GroupInfo.parseFrom( + result.getValue( + GroupInfoManager.META_FAMILY_BYTES, + GroupInfoManager.META_QUALIFIER_BYTES)); + groupInfoList.add(ProtobufUtil.toGroupInfo(proto)); + } + return groupInfoList; + } + + public List retrieveGroupList(ZooKeeperWatcher watcher, + String groupBasePath) throws IOException { + List groupInfoList = Lists.newArrayList(); + //Overwrite any info stored by table, this takes precedence + try { + if(ZKUtil.checkExists(watcher, groupBasePath) != -1) { + for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) { + byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + groupInfoList.add(ProtobufUtil.toGroupInfo(GroupProtos.GroupInfo.parseFrom(bis))); + } + } + LOG.debug("Read ZK GroupInfo count:" + groupInfoList.size()); + } + } catch (KeeperException e) { + throw new IOException("Failed to read groupZNode",e); + } catch (DeserializationException e) { + throw new IOException("Failed to read groupZNode",e); + } catch (InterruptedException e) { + throw new IOException("Failed to read groupZNode",e); + } + return groupInfoList; + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupableBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupableBalancer.java new file mode 100644 index 0000000..c5bb69b --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupableBalancer.java @@ -0,0 +1,29 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.LoadBalancer; + + +@InterfaceAudience.Private +public interface GroupableBalancer extends LoadBalancer { +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index f7f98fe..c19cf2b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1033,7 +1033,7 @@ public class AssignmentManager { return; } LOG.info("Assigning " + region.getRegionNameAsString() + - " to " + plan.getDestination().toString()); + " to " + plan.getDestination()); // Transition RegionState to PENDING_OPEN regionStates.updateRegionState(region, State.PENDING_OPEN, plan.getDestination()); @@ -1222,8 +1222,13 @@ public class AssignmentManager { || existingPlan.getDestination() == null || !destServers.contains(existingPlan.getDestination())) { newPlan = true; - randomPlan = new RegionPlan(region, null, - balancer.randomAssignment(region, destServers)); + try { + randomPlan = new RegionPlan(region, null, + balancer.randomAssignment(region, destServers)); + } catch (IOException ex) { + LOG.warn("Failed to create new plan.",ex); + return null; + } if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) { List regions = new ArrayList(1); regions.add(region); @@ -1468,6 +1473,8 @@ public class AssignmentManager { throw new IOException("Unable to determine a plan to assign region(s)"); } + processBogusAssignments(bulkPlan); + assign(regions.size(), servers.size(), "retainAssignment=true", bulkPlan); } @@ -1497,6 +1504,8 @@ public class AssignmentManager { throw new IOException("Unable to determine a plan to assign region(s)"); } + processBogusAssignments(bulkPlan); + processFavoredNodes(regions); assign(regions.size(), servers.size(), "round-robin=true", bulkPlan); } @@ -2886,6 +2895,16 @@ public class AssignmentManager { } } + private void processBogusAssignments(Map> bulkPlan) { + if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + // Found no plan for some regions, put those regions in RIT + for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { + regionStates.updateRegionState(hri, State.FAILED_OPEN); + } + bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); + } + } + /** * @return Instance of load balancer */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index f160835..a6effa8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -46,6 +46,11 @@ import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Maps; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Service; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -1396,11 +1401,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server { final byte[] destServerName) throws HBaseIOException { RegionState regionState = assignmentManager.getRegionStates(). getRegionState(Bytes.toString(encodedRegionName)); - if (regionState == null) { + + HRegionInfo hri; + if (regionState != null) { + hri = regionState.getRegion(); + } else { throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); } - HRegionInfo hri = regionState.getRegion(); ServerName dest; if (destServerName == null || destServerName.length == 0) { LOG.info("Passed destination servername is null/empty so " + @@ -1413,7 +1421,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return; } } else { - dest = ServerName.valueOf(Bytes.toString(destServerName)); + ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName)); + dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate)); + if (dest == null) { + LOG.debug("Unable to determine a plan to assign " + hri); + return; + } if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) { // To avoid unnecessary region moving later by balancer. Don't put user @@ -1476,7 +1489,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys); checkInitialized(); sanityCheckTableDescriptor(hTableDescriptor); - if (cpHost != null) { cpHost.preCreateTable(hTableDescriptor, newRegions); } @@ -2780,4 +2792,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { public RegionNormalizerTracker getRegionNormalizerTracker() { return regionNormalizerTracker; } + + @Override + public LoadBalancer getLoadBalancer() { + return balancer; + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index 15dedc6..3ff0def 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -52,6 +52,9 @@ import org.apache.hadoop.hbase.TableName; @InterfaceAudience.Private public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver { + //used to signal to the caller that the region(s) cannot be assigned + ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("localhost,1,1"); + /** * Set the current cluster status. This allows a LoadBalancer to map host name to a server * @param st diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 5fa92c6..c79e862 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.Set; +import com.google.common.net.HostAndPort; import org.apache.commons.lang.ClassUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -63,12 +65,15 @@ public class MasterCoprocessorHost static class MasterEnvironment extends CoprocessorHost.Environment implements MasterCoprocessorEnvironment { private MasterServices masterServices; + final boolean supportGroupCPs; public MasterEnvironment(final Class implClass, final Coprocessor impl, final int priority, final int seq, final Configuration conf, final MasterServices services) { super(impl, priority, seq, conf); this.masterServices = services; + supportGroupCPs = !useLegacyMethod(impl.getClass(), + "preBalanceGroup", ObserverContext.class, String.class); } public MasterServices getMasterServices() { @@ -1148,4 +1153,135 @@ public class MasterCoprocessorHost } return bypass; } + + public void preMoveServers(final Set servers, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preMoveServers(ctx, servers, targetGroup); + } + } + }); + } + + public void postMoveServers(final Set servers, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postMoveServers(ctx, servers, targetGroup); + } + } + }); + } + + public void preMoveTables(final Set tables, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preMoveTables(ctx, tables, targetGroup); + } + } + }); + } + + public void postMoveTables(final Set tables, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postMoveTables(ctx, tables, targetGroup); + } + } + }); + } + + public void preAddGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preAddGroup(ctx, name); + } + } + }); + } + + public void postAddGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) { + oserver.postAddGroup(ctx, name); + } + } + }); + } + + public void preRemoveGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preRemoveGroup(ctx, name); + } + } + }); + } + + public void postRemoveGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postRemoveGroup(ctx, name); + } + } + }); + } + + public void preBalanceGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preBalanceGroup(ctx, name); + } + } + }); + } + + public void postBalanceGroup(final String name, final boolean balanceRan) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postBalanceGroup(ctx, name, balanceRan); + } + } + }); + } + } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index b269c3d..fd27cd6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1404,6 +1404,14 @@ public class MasterRpcServices extends RSRpcServices } Pair pair = MetaTableAccessor.getRegion(master.getConnection(), regionName); + if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) { + pair = new Pair(HRegionInfo.FIRST_META_REGIONINFO, + master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper())); + } + if (pair == null) { + throw new UnknownRegionException(Bytes.toString(regionName)); + } + if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); HRegionInfo hri = pair.getFirst(); if (master.cpHost != null) { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 935644f..c17473b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -384,4 +384,9 @@ public interface MasterServices extends Server { * @throws IOException */ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException; + + /** + * @return load balancer + */ + public LoadBalancer getLoadBalancer(); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 24cc879..4285620 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -31,6 +31,7 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; +import com.google.common.net.HostAndPort; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -2627,4 +2628,34 @@ public class AccessController extends BaseMasterAndRegionObserver public void postReplicateLogEntries(ObserverContext ctx, List entries, CellScanner cells) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + requirePermission("moveServers", Action.ADMIN); + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + requirePermission("moveTables", Action.ADMIN); + } + + @Override + public void preAddGroup(ObserverContext ctx, + String name) throws IOException { + requirePermission("addGroup", Action.ADMIN); + } + + @Override + public void preRemoveGroup(ObserverContext ctx, + String name) throws IOException { + requirePermission("removeGroup", Action.ADMIN); + } + + @Override + public void preBalanceGroup(ObserverContext ctx, + String groupName) throws IOException { + requirePermission("balanceGroup", Action.ADMIN); + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index ef4a579..61600fd 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Collection; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import org.apache.commons.logging.Log; @@ -71,6 +72,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import com.google.common.net.HostAndPort; + /** * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} * interface hooks at all appropriate times during normal HMaster operations. @@ -1289,6 +1292,56 @@ public class TestMasterObserver { public void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroupGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, + String groupName) throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java new file mode 100644 index 0000000..16ef22f --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java @@ -0,0 +1,344 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +import com.google.common.net.HostAndPort; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import javax.management.MBeanServer; +import javax.management.ObjectName; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MediumTests.class}) +public class TestGroups extends TestGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestGroups.class); + private static HMaster master; + private static boolean init = false; + private static GroupAdminEndpoint groupAdminEndpoint; + + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + GroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + GroupAdminEndpoint.class.getName()); + TEST_UTIL.getConfiguration().setBoolean( + HConstants.ZOOKEEPER_USEMULTI, + true); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + ""+NUM_SLAVES_BASE); + + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + + //wait for balancer to come online + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((GroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); + } + }); + admin.setBalancerRunning(false,true); + groupAdmin = new VerifyingGroupAdminClient(GroupAdmin.newClient(TEST_UTIL.getConnection()), + TEST_UTIL.getConfiguration()); + groupAdminEndpoint = + master.getMasterCoprocessorHost().findCoprocessors(GroupAdminEndpoint.class).get(0); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeMethod() throws Exception { + if(!init) { + init = true; + afterMethod(); + } + + } + + @After + public void afterMethod() throws Exception { + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + + int missing = NUM_SLAVES_BASE - getNumServers(); + LOG.info("Restoring servers: "+missing); + for(int i=0; i() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish " + groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + == NUM_SLAVES_BASE; + } + }); + } + + @Test + public void testJmx() throws Exception { + MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); + Iterator it = mBeanServer.queryNames( + new ObjectName("Hadoop:service=HBase,name=Master,sub=rsGroup"), null).iterator(); + //verify it was loaded properly + assertNotNull(it.next()); + + final AtomicReference deadServer = new AtomicReference(null); + + //We use mocks to simulate offline servers to avoid + //the complexity and overhead of killing servers + MasterServices mockMaster = Mockito.mock(MasterServices.class); + final ServerManager mockServerManager = Mockito.mock(ServerManager.class); + Mockito.when(mockMaster.getServerManager()).thenReturn(mockServerManager); + Mockito.when(mockServerManager.getOnlineServersList()).then(new Answer>() { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + GroupInfo groupInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + List finalList = Lists.newArrayList(); + HostAndPort firstServer = groupInfo.getServers().iterator().next(); + for (ServerName server: master.getServerManager().getOnlineServersList()) { + if (!server.getHostPort().equals(firstServer)) { + finalList.add(server); + } + } + deadServer.set(firstServer); + return finalList; + } + }); + GroupMXBean info = new GroupMXBeanImpl(groupAdmin, mockMaster); + + GroupInfo defaultGroup = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertEquals(2, info.getGroups().size()); + assertEquals(defaultGroup.getName(), info.getGroups().get(0).getName()); + assertEquals(defaultGroup.getServers(), Sets.newHashSet(info.getGroups().get(0).getServers())); + assertEquals( + Lists.newArrayList(deadServer.get()), + info.getGroups().iterator().next().getOfflineServers()); + + + GroupInfo barGroup = addGroup(groupAdmin, "bar", 3); + TableName tableName1 = TableName.valueOf(tablePrefix+"_testJmx1"); + TableName tableName2 = TableName.valueOf(tablePrefix+"_testJmx2"); + TEST_UTIL.createTable(tableName1, Bytes.toBytes("f")); + TEST_UTIL.createTable(tableName2, Bytes.toBytes("f")); + groupAdmin.moveTables(Sets.newHashSet(tableName2), barGroup.getName()); + assertEquals(3, info.getGroups().size()); + + int defaultIndex = -1; + int barIndex = -1; + + for(int i=0; i() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + ServerName targetServer = + ServerName.parseServerName(appInfo.getServers().iterator().next().toString()); + AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + //verify it was assigned to the right group + assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); + } + + @Test + public void testDefaultNamespaceCreateAndAssign() throws Exception { + LOG.info("testDefaultNamespaceCreateAndAssign"); + final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign"); + admin.modifyNamespace(NamespaceDescriptor.create("default") + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "default").build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + } + + @Test + public void testNamespaceConstraint() throws Exception { + String nsName = tablePrefix+"_foo"; + String groupName = tablePrefix+"_foo"; + LOG.info("testNamespaceConstraint"); + groupAdmin.addGroup(groupName); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, groupName) + .build()); + //test removing a referenced group + try { + groupAdmin.removeGroup(groupName); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + //test modify group + //changing with the same name is fine + admin.modifyNamespace( + NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, groupName) + .build()); + String anotherGroup = tablePrefix+"_anotherGroup"; + groupAdmin.addGroup(anotherGroup); + //test add non-existent group + admin.deleteNamespace(nsName); + groupAdmin.removeGroup(groupName); + try { + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "foo") + .build()); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + } + + @Test + public void testGroupInfoMultiAccessing() throws Exception { + GroupInfoManager manager = groupAdminEndpoint.getGroupInfoManager(); + final GroupInfo defaultGroup = manager.getGroup("default"); + // getGroup updates default group's server list + // this process must not affect other threads iterating the list + Iterator it = defaultGroup.getServers().iterator(); + manager.getGroup("default"); + it.next(); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java new file mode 100644 index 0000000..20bcb02 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java @@ -0,0 +1,583 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; + +import java.io.IOException; +import java.security.SecureRandom; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public abstract class TestGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestGroupsBase.class); + + //shared + protected final static String groupPrefix = "Group"; + protected final static String tablePrefix = "Group"; + protected final static SecureRandom rand = new SecureRandom(); + + //shared, cluster type specific + protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseAdmin admin; + protected static HBaseCluster cluster; + protected static GroupAdmin groupAdmin; + + public final static long WAIT_TIMEOUT = 60000*5; + public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster + + + + protected GroupInfo addGroup(GroupAdmin gAdmin, String groupName, + int serverCount) throws IOException, InterruptedException { + GroupInfo defaultInfo = gAdmin + .getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertTrue(defaultInfo != null); + assertTrue(defaultInfo.getServers().size() >= serverCount); + gAdmin.addGroup(groupName); + + Set set = new HashSet(); + for(HostAndPort server: defaultInfo.getServers()) { + if(set.size() == serverCount) { + break; + } + set.add(server); + } + gAdmin.moveServers(set, groupName); + GroupInfo result = gAdmin.getGroupInfo(groupName); + assertTrue(result.getServers().size() >= serverCount); + return result; + } + + static void removeGroup(GroupAdminClient groupAdmin, String groupName) throws IOException { + GroupInfo groupInfo = groupAdmin.getGroupInfo(groupName); + groupAdmin.moveTables(groupInfo.getTables(), GroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(groupInfo.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(groupName); + } + + protected void deleteTableIfNecessary() throws IOException { + for (HTableDescriptor desc : TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) { + TEST_UTIL.deleteTable(desc.getTableName()); + } + } + + protected void deleteNamespaceIfNecessary() throws IOException { + for (NamespaceDescriptor desc : TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors()) { + if(desc.getName().startsWith(tablePrefix)) { + admin.deleteNamespace(desc.getName()); + } + } + } + + protected void deleteGroups() throws IOException { + GroupAdmin groupAdmin = GroupAdmin.newClient(TEST_UTIL.getConnection()); + for(GroupInfo group: groupAdmin.listGroups()) { + if(!group.getName().equals(GroupInfo.DEFAULT_GROUP)) { + groupAdmin.moveTables(group.getTables(), GroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(group.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(group.getName()); + } + } + } + + public Map> getTableRegionMap() throws IOException { + Map> map = Maps.newTreeMap(); + Map>> tableServerRegionMap + = getTableServerRegionMap(); + for(TableName tableName : tableServerRegionMap.keySet()) { + if(!map.containsKey(tableName)) { + map.put(tableName, new LinkedList()); + } + for(List subset: tableServerRegionMap.get(tableName).values()) { + map.get(tableName).addAll(subset); + } + } + return map; + } + + public Map>> getTableServerRegionMap() + throws IOException { + Map>> map = Maps.newTreeMap(); + ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus(); + for(ServerName serverName : status.getServers()) { + for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) { + TableName tableName = HRegionInfo.getTable(rl.getName()); + if(!map.containsKey(tableName)) { + map.put(tableName, new TreeMap>()); + } + if(!map.get(tableName).containsKey(serverName)) { + map.get(tableName).put(serverName, new LinkedList()); + } + map.get(tableName).get(serverName).add(rl.getNameAsString()); + } + } + return map; + } + + @Test(expected = ConstraintException.class) + public void testGroupInfoOfTableNonExistent() throws Exception { + groupAdmin.getGroupInfoOfTable(TableName.valueOf("nonexistent")); + } + + @Test + public void testCreateMultiRegion() throws IOException { + LOG.info("testCreateMultiRegion"); + TableName tableName = TableName.valueOf(tablePrefix + "_testCreateMultiRegion"); + byte[] end = {1,3,5,7,9}; + byte[] start = {0,2,4,6,8}; + byte[][] f = {Bytes.toBytes("f")}; + TEST_UTIL.createTable(tableName, f,1,start,end,10); + } + + @Test + public void testCreateAndDrop() throws Exception { + LOG.info("testCreateAndDrop"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndDrop"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("cf")); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(tableName) != null; + } + }); + TEST_UTIL.deleteTable(tableName); + } + + + @Test + public void testSimpleRegionServerMove() throws IOException, + InterruptedException { + LOG.info("testSimpleRegionServerMove"); + + int initNumGroups = groupAdmin.listGroups().size(); + GroupInfo appInfo = addGroup(groupAdmin, groupPrefix + rand.nextInt(), 1); + GroupInfo adminInfo = addGroup(groupAdmin, groupPrefix + rand.nextInt(), 1); + GroupInfo dInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertEquals(initNumGroups + 2, groupAdmin.listGroups().size()); + assertEquals(1, adminInfo.getServers().size()); + assertEquals(1, appInfo.getServers().size()); + assertEquals(getNumServers() - 2, dInfo.getServers().size()); + groupAdmin.moveServers(appInfo.getServers(), + GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(appInfo.getName()); + groupAdmin.moveServers(adminInfo.getServers(), + GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(adminInfo.getName()); + assertEquals(groupAdmin.listGroups().size(), initNumGroups); + } + + // return the real number of region servers, excluding the master embedded region server in 2.0+ + public int getNumServers() throws IOException { + ClusterStatus status = admin.getClusterStatus(); + ServerName master = status.getMaster(); + int count = 0; + for (ServerName sn : status.getServers()) { + if (!sn.equals(master)) { + count++; + } + } + return count; + } + + @Test + public void testMoveServers() throws Exception { + LOG.info("testMoveServers"); + + //create groups and assign servers + addGroup(groupAdmin, "bar", 3); + groupAdmin.addGroup("foo"); + + GroupInfo barGroup = groupAdmin.getGroupInfo("bar"); + GroupInfo fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(3, barGroup.getServers().size()); + assertEquals(0, fooGroup.getServers().size()); + + //test fail bogus server move + try { + groupAdmin.moveServers(Sets.newHashSet(HostAndPort.fromString("foo:9999")),"foo"); + fail("Bogus servers shouldn't have been successfully moved."); + } catch(IOException ex) { + String exp = "Server foo:9999 is not an online server in default group."; + String msg = "Expected '"+exp+"' in exception message: "; + assertTrue(msg+" "+ex.getMessage(), ex.getMessage().contains(exp)); + } + + //test success case + LOG.info("moving servers "+barGroup.getServers()+" to group foo"); + groupAdmin.moveServers(barGroup.getServers(), fooGroup.getName()); + + barGroup = groupAdmin.getGroupInfo("bar"); + fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(0,barGroup.getServers().size()); + assertEquals(3,fooGroup.getServers().size()); + + LOG.info("moving servers "+fooGroup.getServers()+" to group default"); + groupAdmin.moveServers(fooGroup.getServers(), GroupInfo.DEFAULT_GROUP); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getNumServers() == + groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size(); + } + }); + + fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(0,fooGroup.getServers().size()); + + //test group removal + LOG.info("Remove group "+barGroup.getName()); + groupAdmin.removeGroup(barGroup.getName()); + assertEquals(null, groupAdmin.getGroupInfo(barGroup.getName())); + LOG.info("Remove group "+fooGroup.getName()); + groupAdmin.removeGroup(fooGroup.getName()); + assertEquals(null, groupAdmin.getGroupInfo(fooGroup.getName())); + } + + @Test + public void testTableMoveTruncateAndDrop() throws Exception { + LOG.info("testTableMove"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testTableMoveAndDrop"); + final byte[] familyNameBytes = Bytes.toBytes("f"); + String newGroupName = "g_" + rand.nextInt(); + final GroupInfo newGroup = addGroup(groupAdmin, newGroupName, 2); + + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 5; + } + }); + + GroupInfo tableGrp = groupAdmin.getGroupInfoOfTable(tableName); + assertTrue(tableGrp.getName().equals(GroupInfo.DEFAULT_GROUP)); + + //change table's group + LOG.info("Moving table "+tableName+" to "+newGroup.getName()); + groupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); + + //verify group change + assertEquals(newGroup.getName(), + groupAdmin.getGroupInfoOfTable(tableName).getName()); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> serverMap = getTableServerRegionMap().get(tableName); + int count = 0; + if (serverMap != null) { + for (ServerName rs : serverMap.keySet()) { + if (newGroup.containsServer(rs.getHostPort())) { + count += serverMap.get(rs).size(); + } + } + } + return count == 5; + } + }); + + //test truncate + admin.disableTable(tableName); + admin.truncateTable(tableName, true); + assertEquals(1, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size()); + assertEquals(tableName, groupAdmin.getGroupInfo(newGroup.getName()).getTables().first()); + + //verify removed table is removed from group + TEST_UTIL.deleteTable(tableName); + assertEquals(0, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size()); + } + + @Test + public void testGroupBalance() throws Exception { + LOG.info("testGroupBalance"); + String newGroupName = "g_" + rand.nextInt(); + final GroupInfo newGroup = addGroup(groupAdmin, newGroupName, 3); + + final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "testGroupBalance"); + admin.createNamespace( + NamespaceDescriptor.create(tableName.getNamespaceAsString()) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, newGroupName).build()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + byte [] startKey = Bytes.toBytes("aaaaa"); + byte [] endKey = Bytes.toBytes("zzzzz"); + admin.createTable(desc, startKey, endKey, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) { + return false; + } + return regions.size() >= 6; + } + }); + + //make assignment uneven, move all regions to one server + Map> assignMap = + getTableServerRegionMap().get(tableName); + final ServerName first = assignMap.entrySet().iterator().next().getKey(); + for(HRegionInfo region: admin.getTableRegions(tableName)) { + if(!assignMap.get(first).contains(region)) { + admin.move(region.getEncodedNameAsBytes(), Bytes.toBytes(first.getServerName())); + } + } + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> map = getTableServerRegionMap().get(tableName); + if (map == null) { + return true; + } + List regions = map.get(first); + if (regions == null) { + return true; + } + return regions.size() >= 6; + } + }); + + //balance the other group and make sure it doesn't affect the new group + groupAdmin.balanceGroup(GroupInfo.DEFAULT_GROUP); + assertEquals(6, getTableServerRegionMap().get(tableName).get(first).size()); + + groupAdmin.balanceGroup(newGroupName); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + for (List regions : getTableServerRegionMap().get(tableName).values()) { + if (2 != regions.size()) { + return false; + } + } + return true; + } + }); + } + + @Test + public void testRegionMove() throws Exception { + LOG.info("testRegionMove"); + + final GroupInfo newGroup = addGroup(groupAdmin, "g_" + rand.nextInt(), 1); + final TableName tableName = TableName.valueOf(tablePrefix + rand.nextInt()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + // All the regions created below will be assigned to the default group. + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 6; + } + }); + + //get target region to move + Map> assignMap = + getTableServerRegionMap().get(tableName); + String targetRegion = null; + for(ServerName server : assignMap.keySet()) { + targetRegion = assignMap.get(server).size() > 0 ? assignMap.get(server).get(0) : null; + if(targetRegion != null) { + break; + } + } + //get server which is not a member of new group + ServerName targetServer = null; + for(ServerName server : admin.getClusterStatus().getServers()) { + if(!newGroup.containsServer(server.getHostPort())) { + targetServer = server; + break; + } + } + + final AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + + //move target server to group + groupAdmin.moveServers(Sets.newHashSet(targetServer.getHostPort()), + newGroup.getName()); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ProtobufUtil.getOnlineRegions(targetRS).size() <= 0; + } + }); + + // Lets move this region to the new group. + TEST_UTIL.getHBaseAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))), + Bytes.toBytes(targetServer.getServerName())); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return + getTableRegionMap().get(tableName) != null && + getTableRegionMap().get(tableName).size() == 6 && + admin.getClusterStatus().getRegionsInTransition().size() < 1; + } + }); + + //verify that targetServer didn't open it + assertFalse(ProtobufUtil.getOnlineRegions(targetRS).contains(targetRegion)); + } + + @Test + public void testFailRemoveGroup() throws IOException, InterruptedException { + LOG.info("testFailRemoveGroup"); + + int initNumGroups = groupAdmin.listGroups().size(); + addGroup(groupAdmin, "bar", 3); + TableName tableName = TableName.valueOf(tablePrefix+"_my_table"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); + groupAdmin.moveTables(Sets.newHashSet(tableName), "bar"); + GroupInfo barGroup = groupAdmin.getGroupInfo("bar"); + //group is not empty therefore it should fail + try { + groupAdmin.removeGroup(barGroup.getName()); + fail("Expected remove group to fail"); + } catch(IOException e) { + } + //group cannot lose all it's servers therefore it should fail + try { + groupAdmin.moveServers(barGroup.getServers(), GroupInfo.DEFAULT_GROUP); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + groupAdmin.moveTables(barGroup.getTables(), GroupInfo.DEFAULT_GROUP); + try { + groupAdmin.removeGroup(barGroup.getName()); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + groupAdmin.moveServers(barGroup.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(barGroup.getName()); + + assertEquals(initNumGroups, groupAdmin.listGroups().size()); + } + + @Test + public void testKillRS() throws Exception { + LOG.info("testKillRS"); + GroupInfo appInfo = addGroup(groupAdmin, "appInfo", 1); + + + final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "_testKillRS"); + admin.createNamespace( + NamespaceDescriptor.create(tableName.getNamespaceAsString()) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, appInfo.getName()).build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + + ServerName targetServer = ServerName.parseServerName( + appInfo.getServers().iterator().next().toString()); + AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0); + assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + + try { + //stopping may cause an exception + //due to the connection loss + targetRS.stopServer(null, + AdminProtos.StopServerRequest.newBuilder().setReason("Die").build()); + } catch(Exception e) { + } + assertFalse(cluster.getClusterStatus().getServers().contains(targetServer)); + + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + Set newServers = Sets.newHashSet(); + newServers.add( + groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().iterator().next()); + groupAdmin.moveServers(newServers, appInfo.getName()); + admin.assign(targetRegion.getRegionName()); + + //wait for region to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + + targetServer = ServerName.parseServerName( + newServers.iterator().next().toString()); + targetRS = + admin.getConnection().getAdmin(targetServer); + assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + assertEquals(tableName, + ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable()); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java new file mode 100644 index 0000000..35fc896 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java @@ -0,0 +1,186 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + + +//This tests that GroupBasedBalancer will use data in zk +//to do balancing during master startup +//This does not test retain assignment +@Category(MediumTests.class) +public class TestGroupsOfflineMode { + private static final org.apache.commons.logging.Log LOG = + LogFactory.getLog(TestGroupsOfflineMode.class); + private static HMaster master; + private static HBaseAdmin hbaseAdmin; + private static HBaseTestingUtility TEST_UTIL; + private static HBaseCluster cluster; + private static GroupAdminEndpoint groupAdminEndpoint; + public final static long WAIT_TIMEOUT = 60000*5; + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + GroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + GroupAdminEndpoint.class.getName()); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + "1"); + TEST_UTIL.startMiniCluster(2, 3); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + master.balanceSwitch(false); + hbaseAdmin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + //wait till the balancer is in online mode + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((GroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() && + master.getServerManager().getOnlineServersList().size() >= 3; + } + }); + groupAdminEndpoint = + master.getMasterCoprocessorHost().findCoprocessors(GroupAdminEndpoint.class).get(0); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testOffline() throws Exception, InterruptedException { + //table should be after group table name + //so it gets assigned later + final TableName failoverTable = TableName.valueOf("testOffline"); + TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f")); + + GroupAdmin groupAdmin = GroupAdmin.newClient(TEST_UTIL.getConnection()); + + final HRegionServer killRS = ((MiniHBaseCluster)cluster).getRegionServer(0); + final HRegionServer groupRS = ((MiniHBaseCluster)cluster).getRegionServer(1); + final HRegionServer failoverRS = ((MiniHBaseCluster)cluster).getRegionServer(2); + + String newGroup = "my_group"; + groupAdmin.addGroup(newGroup); + if(master.getAssignmentManager().getRegionStates().getRegionAssignments() + .containsValue(failoverRS.getServerName())) { + for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) { + hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), + Bytes.toBytes(failoverRS.getServerName().getServerName())); + } + LOG.info("Waiting for region unassignments on failover RS..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.getServerManager().getLoad(failoverRS.getServerName()) + .getRegionsLoad().size() > 0; + } + }); + } + + //move server to group and make sure all tables are assigned + groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getHostPort()), newGroup); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return groupRS.getNumberOfOnlineRegions() < 1 && + master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1; + } + }); + //move table to group and wait + groupAdmin.moveTables(Sets.newHashSet(GroupInfoManager.GROUP_TABLE_NAME), newGroup); + LOG.info("Waiting for move table..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return groupRS.getNumberOfOnlineRegions() == 1; + } + }); + + groupRS.stop("die"); + //race condition here + TEST_UTIL.getHBaseCluster().getMaster().stopMaster(); + LOG.info("Waiting for offline mode..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return TEST_UTIL.getHBaseCluster().getMaster() != null && + TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() && + TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && + TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() + <= 3; + } + }); + + + GroupInfoManager groupMgr = groupAdminEndpoint.getGroupInfoManager(); + //make sure balancer is in offline mode, since this is what we're testing + assertFalse(groupMgr.isOnline()); + //verify the group affiliation that's loaded from ZK instead of tables + assertEquals(newGroup, + groupMgr.getGroupOfTable(GroupInfoManager.GROUP_TABLE_NAME)); + assertEquals(GroupInfo.DEFAULT_GROUP, groupMgr.getGroupOfTable(failoverTable)); + + //kill final regionserver to see the failover happens for all tables + //except GROUP table since it's group does not have any online RS + killRS.stop("die"); + master = TEST_UTIL.getHBaseCluster().getMaster(); + LOG.info("Waiting for new table assignment..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return failoverRS.getOnlineRegions(failoverTable).size() >= 1; + } + }); + assertEquals(0, failoverRS.getOnlineRegions(GroupInfoManager.GROUP_TABLE_NAME).size()); + + //need this for minicluster to shutdown cleanly + master.stopMaster(); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java new file mode 100644 index 0000000..1a96fce --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java @@ -0,0 +1,149 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import com.google.common.net.HostAndPort; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.GroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.junit.Assert; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class VerifyingGroupAdminClient extends GroupAdmin { + private Table table; + private ZooKeeperWatcher zkw; + private GroupSerDe serDe; + private GroupAdmin wrapped; + + public VerifyingGroupAdminClient(GroupAdmin groupAdmin, Configuration conf) + throws IOException { + wrapped = groupAdmin; + table = ConnectionFactory.createConnection(conf).getTable(GroupInfoManager.GROUP_TABLE_NAME); + zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null); + serDe = new GroupSerDe(); + } + + @Override + public void addGroup(String groupName) throws IOException { + wrapped.addGroup(groupName); + verify(); + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + return wrapped.getGroupInfo(groupName); + } + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + return wrapped.getGroupInfoOfTable(tableName); + } + + @Override + public void moveServers(Set servers, String targetGroup) throws IOException { + wrapped.moveServers(servers, targetGroup); + verify(); + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + wrapped.moveTables(tables, targetGroup); + verify(); + } + + @Override + public void removeGroup(String name) throws IOException { + wrapped.removeGroup(name); + verify(); + } + + @Override + public boolean balanceGroup(String name) throws IOException { + return wrapped.balanceGroup(name); + } + + @Override + public List listGroups() throws IOException { + return wrapped.listGroups(); + } + + @Override + public GroupInfo getGroupOfServer(HostAndPort hostPort) throws IOException { + return wrapped.getGroupOfServer(hostPort); + } + + public void verify() throws IOException { + Map groupMap = Maps.newHashMap(); + Set zList = Sets.newHashSet(); + + for (Result result : table.getScanner(new Scan())) { + GroupProtos.GroupInfo proto = + GroupProtos.GroupInfo.parseFrom( + result.getValue( + GroupInfoManager.META_FAMILY_BYTES, + GroupInfoManager.META_QUALIFIER_BYTES)); + groupMap.put(proto.getName(), ProtobufUtil.toGroupInfo(proto)); + } + Assert.assertEquals(Sets.newHashSet(groupMap.values()), + Sets.newHashSet(wrapped.listGroups())); + try { + String groupBasePath = ZKUtil.joinZNode(zkw.baseZNode, "groupInfo"); + for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { + byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + zList.add(ProtobufUtil.toGroupInfo(GroupProtos.GroupInfo.parseFrom(bis))); + } + } + Assert.assertEquals(zList.size(), groupMap.size()); + for(GroupInfo groupInfo: zList) { + Assert.assertTrue(groupMap.get(groupInfo.getName()).equals(groupInfo)); + } + } catch (KeeperException e) { + throw new IOException("ZK verification failed", e); + } catch (DeserializationException e) { + throw new IOException("ZK verification failed", e); + } catch (InterruptedException e) { + throw new IOException("ZK verification failed", e); + } + } + + @Override + public void close() throws IOException { + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 51861d6..f18026d 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -27,12 +27,16 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -537,7 +541,7 @@ public class TestAssignmentManagerOnCluster { desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta, hri); - MyLoadBalancer.controledRegion = hri.getEncodedName(); + MyLoadBalancer.controledRegion = hri; HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); master.assignRegion(hri); @@ -563,6 +567,105 @@ public class TestAssignmentManagerOnCluster { } /** + * This tests round-robin assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRoundRobinAssignmentFailed() throws Exception { + TableName tableName = TableName.valueOf("testRoundRobinAssignmentFailed"); + try { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + Table meta = admin.getConnection().getTable(TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri; + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + // round-robin assignment but balancer cannot find a plan + // assignment should fail + am.assign(Arrays.asList(hri)); + + // if bulk assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(Arrays.asList(hri)); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(tableName); + } + } + + /** + * This tests retain assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRetainAssignmentFailed() throws Exception { + TableName tableName = TableName.valueOf("testRetainAssignmentFailed"); + try { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri; + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + + Map regions = new HashMap(); + ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + regions.put(hri, dest); + // retainAssignment but balancer cannot find a plan + // assignment should fail + am.assign(regions); + + // if retain assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try retainAssigment again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(regions); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + + // it retains on same server as specified + assertEquals(serverName, dest); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(tableName); + } + } + + /** * This tests region open failure which is not recoverable */ @Test (timeout=60000) @@ -1170,7 +1273,7 @@ public class TestAssignmentManagerOnCluster { static class MyLoadBalancer extends StochasticLoadBalancer { // For this region, if specified, always assign to nowhere - static volatile String controledRegion = null; + static volatile HRegionInfo controledRegion = null; static volatile Integer countRegionServers = null; static AtomicInteger counter = new AtomicInteger(0); @@ -1178,7 +1281,7 @@ public class TestAssignmentManagerOnCluster { @Override public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { - if (regionInfo.getEncodedName().equals(controledRegion)) { + if (regionInfo.equals(controledRegion)) { return null; } return super.randomAssignment(regionInfo, servers); @@ -1196,8 +1299,26 @@ public class TestAssignmentManagerOnCluster { return null; } } + if (regions.get(0).equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, regions); + return m; + } return super.roundRobinAssignment(regions, servers); } + + @Override + public Map> retainAssignment( + Map regions, List servers) { + for (HRegionInfo hri : regions.keySet()) { + if (hri.equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, Lists.newArrayList(regions.keySet())); + return m; + } + } + return super.retainAssignment(regions, servers); + } } public static class MyMaster extends HMaster { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index b5f20f5..def0e7c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -465,6 +465,9 @@ public class TestCatalogJanitor { final long nonce) throws IOException { return -1; } + public LoadBalancer getLoadBalancer() { + return null; + } @Override public long truncateTable( diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java new file mode 100644 index 0000000..a55d7db --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java @@ -0,0 +1,589 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Lists; +import com.google.common.net.HostAndPort; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.group.GroupBasedLoadBalancer; +import org.apache.hadoop.hbase.group.GroupInfo; +import org.apache.hadoop.hbase.group.GroupInfoManager; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +//TODO use stochastic based load balancer instead +@Category(SmallTests.class) +public class TestGroupBasedLoadBalancer { + + private static final Log LOG = LogFactory.getLog(TestGroupBasedLoadBalancer.class); + private static GroupBasedLoadBalancer loadBalancer; + private static SecureRandom rand; + + static String[] groups = new String[] { GroupInfo.DEFAULT_GROUP, "dg2", "dg3", + "dg4" }; + static TableName[] tables = + new TableName[] { TableName.valueOf("dt1"), + TableName.valueOf("dt2"), + TableName.valueOf("dt3"), + TableName.valueOf("dt4")}; + static List servers; + static Map groupMap; + static Map tableMap; + static List tableDescs; + int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 }; + static int regionId = 0; + + @BeforeClass + public static void beforeAllTests() throws Exception { + rand = new SecureRandom(); + servers = generateServers(7); + groupMap = constructGroupInfo(servers, groups); + tableMap = new HashMap(); + tableDescs = constructTableDesc(); + Configuration conf = HBaseConfiguration.create(); + conf.set("hbase.regions.slop", "0"); + conf.set("hbase.group.grouploadbalancer.class", SimpleLoadBalancer.class.getCanonicalName()); + loadBalancer = new GroupBasedLoadBalancer(getMockedGroupInfoManager()); + loadBalancer.setMasterServices(getMockedMaster()); + loadBalancer.setConf(conf); + loadBalancer.initialize(); + } + + /** + * Test the load balancing algorithm. + * + * Invariant is that all servers of the group should be hosting either floor(average) or + * ceiling(average) + * + * @throws Exception + */ + @Test + public void testBalanceCluster() throws Exception { + Map> servers = mockClusterServers(); + ArrayListMultimap list = convertToGroupBasedMap(servers); + LOG.info("Mock Cluster : " + printStats(list)); + List plans = loadBalancer.balanceCluster(servers); + ArrayListMultimap balancedCluster = reconcile( + list, plans); + LOG.info("Mock Balance : " + printStats(balancedCluster)); + assertClusterAsBalanced(balancedCluster); + } + + /** + * Invariant is that all servers of a group have load between floor(avg) and + * ceiling(avg) number of regions. + */ + private void assertClusterAsBalanced( + ArrayListMultimap groupLoadMap) { + for (String gName : groupLoadMap.keySet()) { + List groupLoad = groupLoadMap.get(gName); + int numServers = groupLoad.size(); + int numRegions = 0; + int maxRegions = 0; + int minRegions = Integer.MAX_VALUE; + for (ServerAndLoad server : groupLoad) { + int nr = server.getLoad(); + if (nr > maxRegions) { + maxRegions = nr; + } + if (nr < minRegions) { + minRegions = nr; + } + numRegions += nr; + } + if (maxRegions - minRegions < 2) { + // less than 2 between max and min, can't balance + return; + } + int min = numRegions / numServers; + int max = numRegions % numServers == 0 ? min : min + 1; + + for (ServerAndLoad server : groupLoad) { + assertTrue(server.getLoad() <= max); + assertTrue(server.getLoad() >= min); + } + } + } + + /** + * Tests immediate assignment. + * + * Invariant is that all regions have an assignment. + * + * @throws Exception + */ + @Test + public void testImmediateAssignment() throws Exception { + List regions = randomRegions(20); + Map assignments = loadBalancer + .immediateAssignment(regions, servers); + assertImmediateAssignment(regions, servers, assignments); + } + + /** + * All regions have an assignment. + * + * @param regions + * @param servers + * @param assignments + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertImmediateAssignment(List regions, + List servers, + Map assignments) + throws IOException { + for (HRegionInfo region : regions) { + assertTrue(assignments.containsKey(region)); + ServerName server = assignments.get(region); + TableName tableName = region.getTable(); + + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(server.getHostPort())); + } + } + + /** + * Tests the bulk assignment used during cluster startup. + * + * Round-robin. Should yield a balanced cluster so same invariant as the + * load balancer holds, all servers holding either floor(avg) or + * ceiling(avg). + * + * @throws Exception + */ + @Test + public void testBulkAssignment() throws Exception { + List regions = randomRegions(25); + Map> assignments = loadBalancer + .roundRobinAssignment(regions, servers); + //test empty region/servers scenario + //this should not throw an NPE + loadBalancer.roundRobinAssignment(regions, + Collections.EMPTY_LIST); + //test regular scenario + assertTrue(assignments.keySet().size() == servers.size()); + for (ServerName sn : assignments.keySet()) { + List regionAssigned = assignments.get(sn); + for (HRegionInfo region : regionAssigned) { + TableName tableName = region.getTable(); + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(sn.getHostPort())); + } + } + ArrayListMultimap loadMap = convertToGroupBasedMap(assignments); + assertClusterAsBalanced(loadMap); + } + + /** + * Test the cluster startup bulk assignment which attempts to retain + * assignment info. + * + * @throws Exception + */ + @Test + public void testRetainAssignment() throws Exception { + // Test simple case where all same servers are there + Map> currentAssignments = mockClusterServers(); + Map inputForTest = new HashMap(); + for (ServerName sn : currentAssignments.keySet()) { + for (HRegionInfo region : currentAssignments.get(sn)) { + inputForTest.put(region, sn); + } + } + //verify region->null server assignment is handled + inputForTest.put(randomRegions(1).get(0), null); + Map> newAssignment = loadBalancer + .retainAssignment(inputForTest, servers); + assertRetainedAssignment(inputForTest, servers, newAssignment); + } + + /** + * Asserts a valid retained assignment plan. + *

+ * Must meet the following conditions: + *

    + *
  • Every input region has an assignment, and to an online server + *
  • If a region had an existing assignment to a server with the same + * address a a currently online server, it will be assigned to it + *
+ * + * @param existing + * @param assignment + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertRetainedAssignment( + Map existing, List servers, + Map> assignment) + throws FileNotFoundException, IOException { + // Verify condition 1, every region assigned, and to online server + Set onlineServerSet = new TreeSet(servers); + Set assignedRegions = new TreeSet(); + for (Map.Entry> a : assignment.entrySet()) { + assertTrue( + "Region assigned to server that was not listed as online", + onlineServerSet.contains(a.getKey())); + for (HRegionInfo r : a.getValue()) + assignedRegions.add(r); + } + assertEquals(existing.size(), assignedRegions.size()); + + // Verify condition 2, every region must be assigned to correct server. + Set onlineHostNames = new TreeSet(); + for (ServerName s : servers) { + onlineHostNames.add(s.getHostname()); + } + + for (Map.Entry> a : assignment.entrySet()) { + ServerName currentServer = a.getKey(); + for (HRegionInfo r : a.getValue()) { + ServerName oldAssignedServer = existing.get(r); + TableName tableName = r.getTable(); + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(currentServer.getHostPort())); + if (oldAssignedServer != null + && onlineHostNames.contains(oldAssignedServer + .getHostname())) { + // this region was previously assigned somewhere, and that + // host is still around, then the host must have been is a + // different group. + if (!oldAssignedServer.getHostPort().equals(currentServer.getHostPort())) { + assertFalse(gInfo.containsServer(oldAssignedServer.getHostPort())); + } + } + } + } + } + + private String printStats( + ArrayListMultimap groupBasedLoad) { + StringBuffer sb = new StringBuffer(); + sb.append("\n"); + for (String groupName : groupBasedLoad.keySet()) { + sb.append("Stats for group: " + groupName); + sb.append("\n"); + sb.append(groupMap.get(groupName).getServers()); + sb.append("\n"); + List groupLoad = groupBasedLoad.get(groupName); + int numServers = groupLoad.size(); + int totalRegions = 0; + sb.append("Per Server Load: \n"); + for (ServerAndLoad sLoad : groupLoad) { + sb.append("Server :" + sLoad.getServerName() + " Load : " + + sLoad.getLoad() + "\n"); + totalRegions += sLoad.getLoad(); + } + sb.append(" Group Statistics : \n"); + float average = (float) totalRegions / numServers; + int max = (int) Math.ceil(average); + int min = (int) Math.floor(average); + sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + + average + " max=" + max + " min=" + min + "]"); + sb.append("\n"); + sb.append("==============================="); + sb.append("\n"); + } + return sb.toString(); + } + + private ArrayListMultimap convertToGroupBasedMap( + final Map> serversMap) throws IOException { + ArrayListMultimap loadMap = ArrayListMultimap + .create(); + for (GroupInfo gInfo : getMockedGroupInfoManager().listGroups()) { + Set groupServers = gInfo.getServers(); + for (HostAndPort hostPort : groupServers) { + ServerName actual = null; + for(ServerName entry: servers) { + if(entry.getHostPort().equals(hostPort)) { + actual = entry; + break; + } + } + List regions = serversMap.get(actual); + assertTrue("No load for " + actual, regions != null); + loadMap.put(gInfo.getName(), + new ServerAndLoad(actual, regions.size())); + } + } + return loadMap; + } + + private ArrayListMultimap reconcile( + ArrayListMultimap previousLoad, + List plans) { + ArrayListMultimap result = ArrayListMultimap + .create(); + result.putAll(previousLoad); + if (plans != null) { + for (RegionPlan plan : plans) { + ServerName source = plan.getSource(); + updateLoad(result, source, -1); + ServerName destination = plan.getDestination(); + updateLoad(result, destination, +1); + } + } + return result; + } + + private void updateLoad( + ArrayListMultimap previousLoad, + final ServerName sn, final int diff) { + for (String groupName : previousLoad.keySet()) { + ServerAndLoad newSAL = null; + ServerAndLoad oldSAL = null; + for (ServerAndLoad sal : previousLoad.get(groupName)) { + if (ServerName.isSameHostnameAndPort(sn, sal.getServerName())) { + oldSAL = sal; + newSAL = new ServerAndLoad(sn, sal.getLoad() + diff); + break; + } + } + if (newSAL != null) { + previousLoad.remove(groupName, oldSAL); + previousLoad.put(groupName, newSAL); + break; + } + } + } + + private Map> mockClusterServers() throws IOException { + assertTrue(servers.size() == regionAssignment.length); + Map> assignment = new TreeMap>(); + for (int i = 0; i < servers.size(); i++) { + int numRegions = regionAssignment[i]; + List regions = assignedRegions(numRegions, servers.get(i)); + assignment.put(servers.get(i), regions); + } + return assignment; + } + + /** + * Generate a list of regions evenly distributed between the tables. + * + * @param numRegions The number of regions to be generated. + * @return List of HRegionInfo. + */ + private List randomRegions(int numRegions) { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + rand.nextBytes(start); + rand.nextBytes(end); + int regionIdx = rand.nextInt(tables.length); + for (int i = 0; i < numRegions; i++) { + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + int tableIndex = (i + regionIdx) % tables.length; + HRegionInfo hri = new HRegionInfo( + tables[tableIndex], start, end, false, regionId++); + regions.add(hri); + } + return regions; + } + + /** + * Generate assigned regions to a given server using group information. + * + * @param numRegions the num regions to generate + * @param sn the servername + * @return the list of regions + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + private List assignedRegions(int numRegions, ServerName sn) throws IOException { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + for (int i = 0; i < numRegions; i++) { + TableName tableName = getTableName(sn); + HRegionInfo hri = new HRegionInfo( + tableName, start, end, false, + regionId++); + regions.add(hri); + } + return regions; + } + + private static List generateServers(int numServers) { + List servers = new ArrayList(numServers); + for (int i = 0; i < numServers; i++) { + String host = "server" + rand.nextInt(100000); + int port = rand.nextInt(60000); + servers.add(ServerName.valueOf(host, port, -1)); + } + return servers; + } + + /** + * Construct group info, with each group having at least one server. + * + * @param servers the servers + * @param groups the groups + * @return the map + */ + private static Map constructGroupInfo( + List servers, String[] groups) { + assertTrue(servers != null); + assertTrue(servers.size() >= groups.length); + int index = 0; + Map groupMap = new HashMap(); + for (String grpName : groups) { + GroupInfo groupInfo = new GroupInfo(grpName); + groupInfo.addServer(servers.get(index).getHostPort()); + groupMap.put(grpName, groupInfo); + index++; + } + while (index < servers.size()) { + int grpIndex = rand.nextInt(groups.length); + groupMap.get(groups[grpIndex]).addServer( + servers.get(index).getHostPort()); + index++; + } + return groupMap; + } + + /** + * Construct table descriptors evenly distributed between the groups. + * + * @return the list + */ + private static List constructTableDesc() { + List tds = Lists.newArrayList(); + int index = rand.nextInt(groups.length); + for (int i = 0; i < tables.length; i++) { + HTableDescriptor htd = new HTableDescriptor(tables[i]); + int grpIndex = (i + index) % groups.length ; + String groupName = groups[grpIndex]; + tableMap.put(tables[i], groupName); + tds.add(htd); + } + return tds; + } + + private static MasterServices getMockedMaster() throws IOException { + TableDescriptors tds = Mockito.mock(TableDescriptors.class); + Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); + Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); + Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); + Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); + MasterServices services = Mockito.mock(HMaster.class); + Mockito.when(services.getTableDescriptors()).thenReturn(tds); + AssignmentManager am = Mockito.mock(AssignmentManager.class); + Mockito.when(services.getAssignmentManager()).thenReturn(am); + return services; + } + + private static GroupInfoManager getMockedGroupInfoManager() throws IOException { + GroupInfoManager gm = Mockito.mock(GroupInfoManager.class); + Mockito.when(gm.getGroup(groups[0])).thenReturn( + groupMap.get(groups[0])); + Mockito.when(gm.getGroup(groups[1])).thenReturn( + groupMap.get(groups[1])); + Mockito.when(gm.getGroup(groups[2])).thenReturn( + groupMap.get(groups[2])); + Mockito.when(gm.getGroup(groups[3])).thenReturn( + groupMap.get(groups[3])); + Mockito.when(gm.listGroups()).thenReturn( + Lists.newLinkedList(groupMap.values())); + Mockito.when(gm.isOnline()).thenReturn(true); + Mockito.when(gm.getGroupOfTable(Mockito.any(TableName.class))) + .thenAnswer(new Answer() { + @Override + public String answer(InvocationOnMock invocation) throws Throwable { + return tableMap.get(invocation.getArguments()[0]); + } + }); + return gm; + } + + private TableName getTableName(ServerName sn) throws IOException { + TableName tableName = null; + GroupInfoManager gm = getMockedGroupInfoManager(); + GroupInfo groupOfServer = null; + for(GroupInfo gInfo : gm.listGroups()){ + if(gInfo.containsServer(sn.getHostPort())){ + groupOfServer = gInfo; + break; + } + } + + for(HTableDescriptor desc : tableDescs){ + if(gm.getGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){ + tableName = desc.getTableName(); + } + } + return tableName; + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 8ecc6e3..f2e3ec9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -2763,4 +2763,79 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE); } + + @Test + public void testMoveServers() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null), + null, null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testMoveTables() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null), + null, null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testAddGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preAddGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testRemoveGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preRemoveGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testBalanceGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preBalanceGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } } diff --git hbase-shell/src/main/ruby/hbase.rb hbase-shell/src/main/ruby/hbase.rb index aca1006..f995536 100644 --- hbase-shell/src/main/ruby/hbase.rb +++ hbase-shell/src/main/ruby/hbase.rb @@ -102,5 +102,6 @@ require 'hbase/quotas' require 'hbase/replication_admin' require 'hbase/security' require 'hbase/visibility_labels' +require 'hbase/group_admin' include HBaseQuotasConstants diff --git hbase-shell/src/main/ruby/hbase/group_admin.rb hbase-shell/src/main/ruby/hbase/group_admin.rb new file mode 100644 index 0000000..851cb01 --- /dev/null +++ hbase-shell/src/main/ruby/hbase/group_admin.rb @@ -0,0 +1,124 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java +java_import org.apache.hadoop.hbase.util.Pair + +# Wrapper for org.apache.hadoop.hbase.group.GroupAdminClient +# Which is an API to manage region server groups + +module Hbase + class GroupAdmin + include HBaseConstants + + def initialize(connection, formatter) + @admin = org.apache.hadoop.hbase.group.GroupAdmin.newClient(connection) + @formatter = formatter + end + + def close + @admin.close + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of groups in hbase + def listGroups + @admin.listGroups.map { |g| g.getName } + end + #---------------------------------------------------------------------------------------------- + # get a group's information + def getGroup(group_name) + group = @admin.getGroupInfo(group_name) + res = {} + if block_given? + yield("Servers:") + else + res += v + end + group.getServers.each do |v| + if block_given? + yield(v.toString) + else + res += v.toString + end + end + if block_given? + yield("Tables:") + else + res += v + end + group.getTables.each do |v| + if block_given? + yield(v.toString) + else + res += v.toString + end + end + end + #---------------------------------------------------------------------------------------------- + # add a group + def addGroup(group_name) + @admin.addGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # remove a group + def removeGroup(group_name) + @admin.removeGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # balance a group + def balanceGroup(group_name) + @admin.balanceGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # move server to a group + def moveServers(dest, *args) + servers = java.util.HashSet.new() + args[0].each do |s| + servers.add(com.google.common.net.HostAndPort.fromString(s)) + end + @admin.moveServers(servers, dest) + end + #---------------------------------------------------------------------------------------------- + # move server to a group + def moveTables(dest, *args) + tables = java.util.HashSet.new(); + args[0].each do |s| + tables.add(org.apache.hadoop.hbase.TableName.valueOf(s)) + end + @admin.moveTables(tables,dest) + end + #---------------------------------------------------------------------------------------------- + # get group of server + def getGroupOfServer(server) + @admin.getGroupOfServer(com.google.common.net.HostAndPort.fromString(server)) + end + #---------------------------------------------------------------------------------------------- + # get group of table + def getGroupOfTable(table) + @admin.getGroupInfoOfTable(org.apache.hadoop.hbase.TableName.valueOf(table)) + end + #---------------------------------------------------------------------------------------------- + # get list tables of groups + def listTablesOfGroup(group_name) + @admin.listTablesOfGroup(group_name) + end + end +end diff --git hbase-shell/src/main/ruby/hbase/hbase.rb hbase-shell/src/main/ruby/hbase/hbase.rb index 135e1d5..be1e55b 100644 --- hbase-shell/src/main/ruby/hbase/hbase.rb +++ hbase-shell/src/main/ruby/hbase/hbase.rb @@ -47,6 +47,10 @@ module Hbase ::Hbase::Admin.new(@connection.getAdmin, formatter) end + def group_admin(formatter) + ::Hbase::GroupAdmin.new(@connection, formatter) + end + # Create new one each time def table(table, shell) ::Hbase::Table.new(@connection.getTable(table), shell) diff --git hbase-shell/src/main/ruby/shell.rb hbase-shell/src/main/ruby/shell.rb index 587669d..09f83a6 100644 --- hbase-shell/src/main/ruby/shell.rb +++ hbase-shell/src/main/ruby/shell.rb @@ -107,6 +107,10 @@ module Shell @hbase_quotas_admin ||= hbase.quotas_admin(formatter) end + def hbase_group_admin + @group_admin ||= hbase.group_admin(formatter) + end + def export_commands(where) ::Shell.commands.keys.each do |cmd| # here where is the IRB namespace @@ -426,3 +430,20 @@ Shell.load_command_group( set_visibility ] ) + +Shell.load_command_group( + 'group', + :full_name => 'Groups', + :comment => "NOTE: Above commands are only applicable if running with the Groups setup", + :commands => %w[ + list_groups + get_group + add_group + remove_group + balance_group + move_group_servers + move_group_tables + get_server_group + get_table_group + ] +) diff --git hbase-shell/src/main/ruby/shell/commands.rb hbase-shell/src/main/ruby/shell/commands.rb index 4ad04cd..d7cc39f 100644 --- hbase-shell/src/main/ruby/shell/commands.rb +++ hbase-shell/src/main/ruby/shell/commands.rb @@ -74,6 +74,10 @@ module Shell @shell.hbase_quotas_admin end + def group_admin + @shell.hbase_group_admin + end + #---------------------------------------------------------------------- def formatter diff --git hbase-shell/src/main/ruby/shell/commands/add_group.rb hbase-shell/src/main/ruby/shell/commands/add_group.rb new file mode 100644 index 0000000..7f91ee5 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/add_group.rb @@ -0,0 +1,39 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class AddGroup < Command + def help + return <<-EOF +Create a new region server group. + +Example: + + hbase> add_group 'my_group' +EOF + end + + def command(group_name) + group_admin.addGroup(group_name) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/balance_group.rb hbase-shell/src/main/ruby/shell/commands/balance_group.rb new file mode 100644 index 0000000..4c59f63 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/balance_group.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class BalanceGroup < Command + def help + return <<-EOF +Balance a region server group + + hbase> group_balance 'my_group' +EOF + end + + def command(group_name) + group_admin.balanceGroup(group_name) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/get_group.rb hbase-shell/src/main/ruby/shell/commands/get_group.rb new file mode 100644 index 0000000..5ed8226 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/get_group.rb @@ -0,0 +1,44 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetGroup < Command + def help + return <<-EOF +Get a region server group's information. + +Example: + + hbase> get_group 'default' +EOF + end + + def command(group_name) + now = Time.now + formatter.header([ "GROUP INFORMATION" ]) + group_admin.getGroup(group_name) do |s| + formatter.row([ s ]) + end + formatter.footer(now) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/get_server_group.rb hbase-shell/src/main/ruby/shell/commands/get_server_group.rb new file mode 100644 index 0000000..c78d4d2 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/get_server_group.rb @@ -0,0 +1,40 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetServerGroup < Command + def help + return <<-EOF +Get the group name the given region server is a member of. + + hbase> get_server_group 'server1:port1' +EOF + end + + def command(server) + now = Time.now + groupName = group_admin.getGroupOfServer(server).getName + formatter.row([ groupName ]) + formatter.footer(now,1) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/get_table_group.rb hbase-shell/src/main/ruby/shell/commands/get_table_group.rb new file mode 100644 index 0000000..dd8766d --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/get_table_group.rb @@ -0,0 +1,41 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetTableGroup < Command + def help + return <<-EOF +Get the group name the given table is a member of. + + hbase> get_table_group 'myTable' +EOF + end + + def command(table) + now = Time.now + groupName = + group_admin.getGroupOfTable(table).getName + formatter.row([ groupName ]) + formatter.footer(now,1) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/list_groups.rb hbase-shell/src/main/ruby/shell/commands/list_groups.rb new file mode 100644 index 0000000..2e7dd08 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/list_groups.rb @@ -0,0 +1,50 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ListGroups < Command + def help + return <<-EOF +List all region server groups. Optional regular expression parameter could +be used to filter the output. + +Example: + + hbase> list_groups + hbase> list_groups 'abc.*' +EOF + end + + def command(regex = ".*") + now = Time.now + formatter.header([ "GROUPS" ]) + + regex = /#{regex}/ unless regex.is_a?(Regexp) + list = group_admin.listGroups.grep(regex) + list.each do |group| + formatter.row([ group ]) + end + + formatter.footer(now, list.size) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb new file mode 100644 index 0000000..5e5c850 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveGroupServers < Command + def help + return <<-EOF +Reassign a region server from one group to another. + + hbase> move_group_servers 'dest',['server1:port','server2:port'] +EOF + end + + def command(dest, *servers) + group_admin.moveServers(dest, *servers) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb new file mode 100644 index 0000000..f495f2c --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveGroupTables < Command + def help + return <<-EOF +Reassign tables from one group to another. + + hbase> move_group_tables 'dest',['table1','table2'] +EOF + end + + def command(dest, *servers) + group_admin.moveTables(dest, *servers) + end + end + end +end diff --git hbase-shell/src/main/ruby/shell/commands/remove_group.rb hbase-shell/src/main/ruby/shell/commands/remove_group.rb new file mode 100644 index 0000000..66863a4 --- /dev/null +++ hbase-shell/src/main/ruby/shell/commands/remove_group.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class RemoveGroup < Command + def help + return <<-EOF +Remove a group. + + hbase> remove_group 'my_group' +EOF + end + + def command(group_name) + group_admin.removeGroup(group_name) + end + end + end +end diff --git hbase-shell/src/test/ruby/test_helper.rb hbase-shell/src/test/ruby/test_helper.rb index 67bbb14..07fd4a4 100644 --- hbase-shell/src/test/ruby/test_helper.rb +++ hbase-shell/src/test/ruby/test_helper.rb @@ -72,6 +72,10 @@ module Hbase @shell.hbase_replication_admin end + def group_admin(formatter) + @shell.hbase_group_admin + end + def create_test_table(name) # Create the table if needed unless admin.exists?(name)