diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java index 36a67fe..743e425 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -88,9 +88,8 @@ public class ServerName implements Comparable, Serializable { public static final String UNKNOWN_SERVERNAME = "#unknown#"; private final String servername; - private final String hostnameOnly; - private final int port; private final long startcode; + private final HostPort hostPort; /** * Cached versioned bytes of this ServerName instance. @@ -102,10 +101,9 @@ public class ServerName implements Comparable, Serializable { private ServerName(final String hostname, final int port, final long startcode) { // Drop the domain is there is one; no need of it in a local cluster. With it, we get long // unwieldy names. - this.hostnameOnly = hostname; - this.port = port; + this.hostPort = new HostPort(hostname, port); this.startcode = startcode; - this.servername = getServerName(this.hostnameOnly, port, startcode); + this.servername = getServerName(hostname, port, startcode); } /** @@ -189,7 +187,8 @@ public class ServerName implements Comparable, Serializable { * in compares, etc. */ public String toShortString() { - return Addressing.createHostAndPortStr(getHostNameMinusDomain(this.hostnameOnly), this.port); + return Addressing.createHostAndPortStr( + getHostNameMinusDomain(hostPort.getHostname()), hostPort.getPort()); } /** @@ -208,11 +207,11 @@ public class ServerName implements Comparable, Serializable { } public String getHostname() { - return hostnameOnly; + return hostPort.getHostname(); } public int getPort() { - return port; + return hostPort.getPort(); } public long getStartcode() { @@ -256,7 +255,11 @@ public class ServerName implements Comparable, Serializable { * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostAndPort() { - return Addressing.createHostAndPortStr(this.hostnameOnly, this.port); + return Addressing.createHostAndPortStr(hostPort.getHostname(), hostPort.getPort()); + } + + public HostPort getHostPort() { + return hostPort; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index e267c50..9087198 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.group.GroupAdmin; +import org.apache.hadoop.hbase.group.GroupAdminClient; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; @@ -555,4 +557,9 @@ public interface HConnection extends Abortable, Closeable { * @return the configured client backoff policy */ ClientBackoffPolicy getBackoffPolicy(); + + /** + * @return client for region server group apis + */ + GroupAdmin getGroupAdmin() throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index a85bda6..6fc2e4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -71,6 +71,8 @@ import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; +import org.apache.hadoop.hbase.group.GroupAdmin; +import org.apache.hadoop.hbase.group.GroupAdminClient; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -81,8 +83,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; @@ -111,6 +117,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusR import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; @@ -131,6 +143,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshot import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; @@ -146,8 +160,14 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; @@ -2220,6 +2240,50 @@ public class HConnectionManager { SecurityCapabilitiesRequest request) throws ServiceException { return stub.getSecurityCapabilities(controller, request); } + + public GetGroupInfoResponse getGroupInfo(RpcController controller, GetGroupInfoRequest request) throws ServiceException { + return stub.getGroupInfo(controller, request); + } + + @Override + public GetGroupInfoOfTableResponse getGroupInfoOfTable(RpcController controller, GetGroupInfoOfTableRequest request) throws ServiceException { + return stub.getGroupInfoOfTable(controller, request); + } + + @Override + public GetGroupInfoOfServerResponse getGroupInfoOfServer(RpcController controller, GetGroupInfoOfServerRequest request) throws ServiceException { + return stub.getGroupInfoOfServer(controller, request); + } + + @Override + public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request) throws ServiceException { + return stub.moveServers(controller, request); + } + + @Override + public MoveTablesResponse moveTables(RpcController controller, MoveTablesRequest request) throws ServiceException { + return stub.moveTables(controller, request); + } + + @Override + public AddGroupResponse addGroup(RpcController controller, AddGroupRequest request) throws ServiceException { + return stub.addGroup(controller, request); + } + + @Override + public RemoveGroupResponse removeGroup(RpcController controller, RemoveGroupRequest request) throws ServiceException { + return stub.removeGroup(controller, request); + } + + @Override + public BalanceGroupResponse balanceGroup(RpcController controller, BalanceGroupRequest request) throws ServiceException { + return stub.balanceGroup(controller, request); + } + + @Override + public ListGroupInfosResponse listGroupInfos(RpcController controller, ListGroupInfosRequest request) throws ServiceException { + return stub.listGroupInfos(controller, request); + } }; } @@ -2498,6 +2562,11 @@ public class HConnectionManager { return this.backoffPolicy; } + @Override + public GroupAdmin getGroupAdmin() throws IOException { + return new GroupAdminClient(conf); + } + /* * Return the number of cached region for a table. It will only be called * from a unit test. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java new file mode 100644 index 0000000..691e9dc --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/group/GroupAdminClient.java @@ -0,0 +1,192 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import com.google.protobuf.ServiceException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +/** + * Client used for managing region server group information. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class GroupAdminClient implements GroupAdmin { + private MasterProtos.MasterService.BlockingInterface proxy; + private static final Log LOG = LogFactory.getLog(GroupAdminClient.class); + + public GroupAdminClient(Configuration conf) throws IOException { + proxy = new HBaseAdmin(conf).getConnection().getKeepAliveMasterService(); + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + try { + MasterProtos.GetGroupInfoResponse resp = + proxy.getGroupInfo(null, + MasterProtos.GetGroupInfoRequest.newBuilder().setGroupName(groupName).build()); + if(resp.hasGroupInfo()) { + return ProtobufUtil.toGroupInfo(resp.getGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + MasterProtos.GetGroupInfoOfTableRequest request = + MasterProtos.GetGroupInfoOfTableRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); + + try { + return ProtobufUtil.toGroupInfo(proxy.getGroupInfoOfTable(null, request).getGroupInfo()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void moveServers(Set servers, String targetGroup) throws IOException { + Set hostPorts = Sets.newHashSet(); + for(HostPort el: servers) { + hostPorts.add(HBaseProtos.HostPort.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + MasterProtos.MoveServersRequest request = + MasterProtos.MoveServersRequest.newBuilder() + .setTargetGroup(targetGroup) + .addAllServers(hostPorts).build(); + + try { + proxy.moveServers(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + MasterProtos.MoveTablesRequest.Builder builder = + MasterProtos.MoveTablesRequest.newBuilder() + .setTargetGroup(targetGroup); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + try { + proxy.moveTables(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void addGroup(String groupName) throws IOException { + MasterProtos.AddGroupRequest request = + MasterProtos.AddGroupRequest.newBuilder() + .setGroupName(groupName).build(); + try { + proxy.addGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void removeGroup(String name) throws IOException { + MasterProtos.RemoveGroupRequest request = + MasterProtos.RemoveGroupRequest.newBuilder() + .setGroupName(name).build(); + try { + proxy.removeGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public boolean balanceGroup(String name) throws IOException { + MasterProtos.BalanceGroupRequest request = + MasterProtos.BalanceGroupRequest.newBuilder() + .setGroupName(name).build(); + + try { + return proxy.balanceGroup(null, request).getBalanceRan(); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public List listGroups() throws IOException { + try { + List resp = + proxy.listGroupInfos(null, MasterProtos.ListGroupInfosRequest.newBuilder().build()) + .getGroupInfoList(); + List result = new ArrayList(resp.size()); + for(RSGroupProtos.GroupInfo entry: resp) { + result.add(ProtobufUtil.toGroupInfo(entry)); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public GroupInfo getGroupOfServer(HostPort hostPort) throws IOException { + MasterProtos.GetGroupInfoOfServerRequest request = + MasterProtos.GetGroupInfoOfServerRequest.newBuilder() + .setServer(HBaseProtos.HostPort.newBuilder() + .setHostName(hostPort.getHostname()) + .setPort(hostPort.getPort()) + .build()) + .build(); + try { + return ProtobufUtil.toGroupInfo( + proxy.getGroupInfoOfServer(null, request).getGroupInfo()); + } catch (ServiceException e) { + throw ProtobufUtil.getRemoteException(e); + } + } + + @Override + public void close() throws IOException { + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 2b80e87..a851416 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -31,6 +31,7 @@ import com.google.protobuf.Service; import com.google.protobuf.ServiceException; import com.google.protobuf.TextFormat; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -62,6 +63,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.LimitInputStream; +import org.apache.hadoop.hbase.group.GroupInfo; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; @@ -114,6 +116,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; @@ -2954,4 +2957,34 @@ public final class ProtobufUtil { } return scList; } + + public static GroupInfo toGroupInfo(RSGroupProtos.GroupInfo proto) { + GroupInfo groupInfo = new GroupInfo(proto.getName()); + for(HBaseProtos.HostPort el: proto.getServersList()) { + groupInfo.addServer(new HostPort(el.getHostName(), el.getPort())); + } + for(HBaseProtos.TableName pTableName: proto.getTablesList()) { + groupInfo.addTable(ProtobufUtil.toTableName(pTableName)); + } + return groupInfo; + } + + public static RSGroupProtos.GroupInfo toProtoGroupInfo(GroupInfo pojo) { + List tables = + new ArrayList(pojo.getTables().size()); + for(TableName arg: pojo.getTables()) { + tables.add(ProtobufUtil.toProtoTableName(arg)); + } + List hostports = + new ArrayList(pojo.getServers().size()); + for(HostPort el: pojo.getServers()) { + hostports.add(HBaseProtos.HostPort.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + return RSGroupProtos.GroupInfo.newBuilder().setName(pojo.getName()) + .addAllServers(hostports) + .addAllTables(tables).build(); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java new file mode 100644 index 0000000..c047ee0 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HostPort.java @@ -0,0 +1,76 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Addressing; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HostPort implements Comparable { + private final String hostnameOnly; + private final int port; + + public HostPort(final String hostname, final int port) { + this.hostnameOnly = hostname; + this.port = port; + } + + public String getHostname() { + return hostnameOnly; + } + + public int getPort() { + return port; + } + + public static HostPort valueOf(final String hostport) { + String splits[] = hostport.split(":",2); + if(splits.length < 2) + throw new IllegalArgumentException("Server list contains not a valid : entry"); + return new HostPort(splits[0], Integer.parseInt(splits[1])); + } + + @Override + public String toString() { + return Addressing.createHostAndPortStr(this.hostnameOnly, this.port); + } + + @Override + public int compareTo(HostPort other) { + int compare = this.getHostname().compareToIgnoreCase(other.getHostname()); + if (compare != 0) return compare; + compare = this.getPort() - other.getPort(); + return compare; + } + + @Override + public int hashCode() { + return toString().hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null) return false; + if (!(o instanceof HostPort)) return false; + return this.compareTo((HostPort)o) == 0; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java new file mode 100644 index 0000000..822c1ef --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupAdmin.java @@ -0,0 +1,108 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Group user API interface used between client and server. + */ +@InterfaceAudience.Private +public interface GroupAdmin extends Closeable { + /** + * Gets the group information. + * + * @param groupName the group name + * @return An instance of GroupInfo + */ + GroupInfo getGroupInfo(String groupName) throws IOException; + + /** + * Gets the group info of table. + * + * @param tableName the table name + * @return An instance of GroupInfo. + */ + GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException; + + /** + * Move a set of serves to another group + * + * + * @param servers set of servers, must be in the form HOST:PORT + * @param targetGroup the target group + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void moveServers(Set servers, String targetGroup) throws IOException; + + /** + * Move tables to a new group. + * This will unassign all of a table's region so it can be reassigned to the correct group. + * @param tables list of tables to move + * @param targetGroup target group + * @throws java.io.IOException + */ + void moveTables(Set tables, String targetGroup) throws IOException; + + /** + * Add a new group + * @param name name of the group + * @throws java.io.IOException + */ + void addGroup(String name) throws IOException; + + /** + * Remove a group + * @param name name of the group + * @throws java.io.IOException + */ + void removeGroup(String name) throws IOException; + + /** + * Balance the regions in a group + * + * @param name the name of the gorup to balance + * @return + * @throws java.io.IOException + */ + boolean balanceGroup(String name) throws IOException; + + /** + * Lists the existing groups. + * + * @return Collection of GroupInfo. + */ + List listGroups() throws IOException; + + /** + * Retrieve the GroupInfo a server is affiliated to + * @param hostPort + * @return + * @throws java.io.IOException + */ + GroupInfo getGroupOfServer(HostPort hostPort) throws IOException; +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java new file mode 100644 index 0000000..41a6e2e --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/group/GroupInfo.java @@ -0,0 +1,182 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.codehaus.jackson.annotate.JsonCreator; +import org.codehaus.jackson.annotate.JsonProperty; + +import java.io.Serializable; +import java.util.Collection; +import java.util.NavigableSet; + +/** + * Stores the group information of region server groups. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class GroupInfo implements Serializable { + + public static final String DEFAULT_GROUP = "default"; + public static final String NAMESPACEDESC_PROP_GROUP = "hbase.rsgroup.name"; + public static final String TABLEDESC_PROP_GROUP = "hbase.rsgroup.name"; + public static final String TRANSITION_GROUP_PREFIX = "_transition_"; + + private String name; + private NavigableSet servers; + private NavigableSet tables; + + public GroupInfo(String name) { + this(name, Sets.newTreeSet(), Sets.newTreeSet()); + } + + //constructor for jackson + @JsonCreator + GroupInfo(@JsonProperty("name") String name, + @JsonProperty("servers") NavigableSet servers, + @JsonProperty("tables") NavigableSet tables) { + this.name = name; + this.servers = servers; + this.tables = tables; + } + + public GroupInfo(GroupInfo src) { + name = src.getName(); + servers = Sets.newTreeSet(src.getServers()); + tables = Sets.newTreeSet(src.getTables()); + } + + /** + * Get group name. + * + * @return + */ + public String getName() { + return name; + } + + /** + * Adds the server to the group. + * + * @param hostPort the server + */ + public void addServer(HostPort hostPort){ + servers.add(hostPort); + } + + /** + * Adds a group of servers. + * + * @param hostPort the servers + */ + public void addAllServers(Collection hostPort){ + servers.addAll(hostPort); + } + + /** + * @param hostPort + * @return true, if a server with hostPort is found + */ + public boolean containsServer(HostPort hostPort) { + return servers.contains(hostPort); + } + + /** + * Get list of servers. + * + * @return + */ + public NavigableSet getServers() { + return servers; + } + + /** + * Remove a server from this group. + * + * @param hostPort + */ + public boolean removeServer(HostPort hostPort) { + return servers.remove(hostPort); + } + + /** + * Set of tables that are members of this group + * @return + */ + public NavigableSet getTables() { + return tables; + } + + public void addTable(TableName table) { + tables.add(table); + } + + public void addAllTables(Collection arg) { + tables.addAll(arg); + } + + public boolean containsTable(TableName table) { + return tables.contains(table); + } + + public boolean removeTable(TableName table) { + return tables.remove(table); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + sb.append("GroupName:"); + sb.append(this.name); + sb.append(", "); + sb.append(" Servers:"); + sb.append(this.servers); + return sb.toString(); + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + GroupInfo groupInfo = (GroupInfo) o; + + if (!name.equals(groupInfo.name)) return false; + if (!servers.equals(groupInfo.servers)) return false; + if (!tables.equals(groupInfo.tables)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = servers.hashCode(); + result = 31 * result + tables.hashCode(); + result = 31 * result + name.hashCode(); + return result; + } + +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java new file mode 100644 index 0000000..62f4f8a --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java @@ -0,0 +1,89 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.junit.After; +import org.junit.Before; +import org.junit.experimental.categories.Category; + +/** + * Runs all of the units tests defined in TestGroupBase + * as an integration test. + * Requires TestGroupBase.NUM_SLAVE_BASE servers to run. + */ +@Category(IntegrationTests.class) +public class IntegrationTestGroup extends TestGroupsBase { + //Integration specific + private final static Log LOG = LogFactory.getLog(IntegrationTestGroup.class); + private static boolean initialized = false; + + @Before + public void beforeMethod() throws Exception { + if(!initialized) { + LOG.info("Setting up IntegrationTestGroup"); + LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers"); + TEST_UTIL = new IntegrationTestingUtility(); + ((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE); + //set shared configs + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseClusterInterface(); + groupAdmin = new VerifyingGroupAdminClient(admin.getConnection().getGroupAdmin(), + TEST_UTIL.getConfiguration()); + LOG.info("Done initializing cluster"); + initialized = true; + //cluster may not be clean + //cleanup when initializing + afterMethod(); + } + } + + @After + public void afterMethod() throws Exception { + LOG.info("Cleaning up previous test run"); + //cleanup previous artifacts + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + admin.setBalancerRunning(false,true); + + LOG.info("Restoring the cluster"); + ((IntegrationTestingUtility)TEST_UTIL).restoreCluster(); + LOG.info("Done restoring the cluster"); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + == NUM_SLAVES_BASE; + } + }); + LOG.info("Done cleaning up previous test run"); + } +} \ No newline at end of file diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index 9c0447e..81dbd77 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -139,6 +139,621 @@ public final class HBaseProtos { // @@protoc_insertion_point(enum_scope:CompareType) } + public interface HostPortOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string host_name = 1; + /** + * required string host_name = 1; + */ + boolean hasHostName(); + /** + * required string host_name = 1; + */ + java.lang.String getHostName(); + /** + * required string host_name = 1; + */ + com.google.protobuf.ByteString + getHostNameBytes(); + + // required uint32 port = 2; + /** + * required uint32 port = 2; + */ + boolean hasPort(); + /** + * required uint32 port = 2; + */ + int getPort(); + } + /** + * Protobuf type {@code HostPort} + */ + public static final class HostPort extends + com.google.protobuf.GeneratedMessage + implements HostPortOrBuilder { + // Use HostPort.newBuilder() to construct. + private HostPort(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private HostPort(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final HostPort defaultInstance; + public static HostPort getDefaultInstance() { + return defaultInstance; + } + + public HostPort getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private HostPort( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + hostName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + port_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public HostPort parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new HostPort(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string host_name = 1; + public static final int HOST_NAME_FIELD_NUMBER = 1; + private java.lang.Object hostName_; + /** + * required string host_name = 1; + */ + public boolean hasHostName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string host_name = 1; + */ + public java.lang.String getHostName() { + java.lang.Object ref = hostName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + hostName_ = s; + } + return s; + } + } + /** + * required string host_name = 1; + */ + public com.google.protobuf.ByteString + getHostNameBytes() { + java.lang.Object ref = hostName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hostName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint32 port = 2; + public static final int PORT_FIELD_NUMBER = 2; + private int port_; + /** + * required uint32 port = 2; + */ + public boolean hasPort() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 port = 2; + */ + public int getPort() { + return port_; + } + + private void initFields() { + hostName_ = ""; + port_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasHostName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPort()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getHostNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, port_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getHostNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, port_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) obj; + + boolean result = true; + result = result && (hasHostName() == other.hasHostName()); + if (hasHostName()) { + result = result && getHostName() + .equals(other.getHostName()); + } + result = result && (hasPort() == other.hasPort()); + if (hasPort()) { + result = result && (getPort() + == other.getPort()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasHostName()) { + hash = (37 * hash) + HOST_NAME_FIELD_NUMBER; + hash = (53 * hash) + getHostName().hashCode(); + } + if (hasPort()) { + hash = (37 * hash) + PORT_FIELD_NUMBER; + hash = (53 * hash) + getPort(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code HostPort} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + hostName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + port_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_HostPort_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.hostName_ = hostName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.port_ = port_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()) return this; + if (other.hasHostName()) { + bitField0_ |= 0x00000001; + hostName_ = other.hostName_; + onChanged(); + } + if (other.hasPort()) { + setPort(other.getPort()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasHostName()) { + + return false; + } + if (!hasPort()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string host_name = 1; + private java.lang.Object hostName_ = ""; + /** + * required string host_name = 1; + */ + public boolean hasHostName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string host_name = 1; + */ + public java.lang.String getHostName() { + java.lang.Object ref = hostName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hostName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string host_name = 1; + */ + public com.google.protobuf.ByteString + getHostNameBytes() { + java.lang.Object ref = hostName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hostName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string host_name = 1; + */ + public Builder setHostName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostName_ = value; + onChanged(); + return this; + } + /** + * required string host_name = 1; + */ + public Builder clearHostName() { + bitField0_ = (bitField0_ & ~0x00000001); + hostName_ = getDefaultInstance().getHostName(); + onChanged(); + return this; + } + /** + * required string host_name = 1; + */ + public Builder setHostNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostName_ = value; + onChanged(); + return this; + } + + // required uint32 port = 2; + private int port_ ; + /** + * required uint32 port = 2; + */ + public boolean hasPort() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 port = 2; + */ + public int getPort() { + return port_; + } + /** + * required uint32 port = 2; + */ + public Builder setPort(int value) { + bitField0_ |= 0x00000002; + port_ = value; + onChanged(); + return this; + } + /** + * required uint32 port = 2; + */ + public Builder clearPort() { + bitField0_ = (bitField0_ & ~0x00000002); + port_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:HostPort) + } + + static { + defaultInstance = new HostPort(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:HostPort) + } + public interface TableNameOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -16107,6 +16722,11 @@ public final class HBaseProtos { } private static com.google.protobuf.Descriptors.Descriptor + internal_static_HostPort_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_HostPort_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_TableName_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -16225,186 +16845,193 @@ public final class HBaseProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\013HBase.proto\032\nCell.proto\"1\n\tTableName\022\021" + - "\n\tnamespace\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014\"\250\001\n" + - "\013TableSchema\022\036\n\ntable_name\030\001 \001(\0132\n.Table" + - "Name\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPai" + - "r\022,\n\017column_families\030\003 \003(\0132\023.ColumnFamil" + - "ySchema\022&\n\rconfiguration\030\004 \003(\0132\017.NameStr" + - "ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " + - "\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" + - "\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" + - "\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta", - "ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" + - "\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" + - "\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014favore" + - "d_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpec" + - "ifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Re" + - "gionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regi" + - "onSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCO" + - "DED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001" + - " \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_n" + - "ame\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 ", - "\001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016Name" + - "StringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"" + - ",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030" + - "\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016" + - "\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030" + - "\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\275\001\n\023SnapshotDescrip" + - "tion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcre" + - "ation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snap" + - "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" + - " \001(\005\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n", - "\tSKIPFLUSH\020\002\"}\n\024ProcedureDescription\022\021\n\t" + - "signature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcre" + - "ation_time\030\003 \001(\003:\0010\022&\n\rconfiguration\030\004 \003" + - "(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongM" + - "sg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndou" + - "ble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdec" + - "imal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits" + - "\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Namespa" + - "ceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfigurat" + - "ion\030\002 \003(\0132\017.NameStringPair\"$\n\020RegionServ", - "erInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022" + - "\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022" + - "\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007" + - "GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoo" + - "p.hbase.protobuf.generatedB\013HBaseProtosH" + - "\001\240\001\001" + "\n\013HBase.proto\032\nCell.proto\"+\n\010HostPort\022\021\n" + + "\thost_name\030\001 \002(\t\022\014\n\004port\030\002 \002(\r\"1\n\tTableN" + + "ame\022\021\n\tnamespace\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(" + + "\014\"\250\001\n\013TableSchema\022\036\n\ntable_name\030\001 \001(\0132\n." + + "TableName\022#\n\nattributes\030\002 \003(\0132\017.BytesByt" + + "esPair\022,\n\017column_families\030\003 \003(\0132\023.Column" + + "FamilySchema\022&\n\rconfiguration\030\004 \003(\0132\017.Na" + + "meStringPair\"o\n\022ColumnFamilySchema\022\014\n\004na" + + "me\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesByte" + + "sPair\022&\n\rconfiguration\030\003 \003(\0132\017.NameStrin", + "gPair\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022" + + "\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\021\n\tstart" + + "_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005" + + " \001(\010\022\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014f" + + "avored_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017Regio" + + "nSpecifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifi" + + "er.RegionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n" + + "\023RegionSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n" + + "\023ENCODED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004f" + + "rom\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\th", + "ost_name\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_co" + + "de\030\003 \001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n" + + "\016NameStringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002" + + " \002(\t\",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005v" + + "alue\030\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 " + + "\002(\014\022\016\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004" + + "name\030\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\275\001\n\023SnapshotDe" + + "scription\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030" + + "\n\rcreation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031" + + ".SnapshotDescription.Type:\005FLUSH\022\017\n\007vers", + "ion\030\005 \001(\005\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH" + + "\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureDescriptio" + + "n\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030" + + "\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfiguratio" + + "n\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007" + + "LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022" + + "\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016b" + + "igdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig" + + "_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Na" + + "mespaceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfi", + "guration\030\002 \003(\0132\017.NameStringPair\"$\n\020Regio" + + "nServerInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013Compare" + + "Type\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQU" + + "AL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020" + + "\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache." + + "hadoop.hbase.protobuf.generatedB\013HBasePr" + + "otosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; - internal_static_TableName_descriptor = + internal_static_HostPort_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_HostPort_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_HostPort_descriptor, + new java.lang.String[] { "HostName", "Port", }); + internal_static_TableName_descriptor = + getDescriptor().getMessageTypes().get(1); internal_static_TableName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableName_descriptor, new java.lang.String[] { "Namespace", "Qualifier", }); internal_static_TableSchema_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(2); internal_static_TableSchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSchema_descriptor, new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", }); internal_static_ColumnFamilySchema_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_ColumnFamilySchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ColumnFamilySchema_descriptor, new java.lang.String[] { "Name", "Attributes", "Configuration", }); internal_static_RegionInfo_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_RegionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInfo_descriptor, new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", }); internal_static_FavoredNodes_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_FavoredNodes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FavoredNodes_descriptor, new java.lang.String[] { "FavoredNode", }); internal_static_RegionSpecifier_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_RegionSpecifier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionSpecifier_descriptor, new java.lang.String[] { "Type", "Value", }); internal_static_TimeRange_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(7); internal_static_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TimeRange_descriptor, new java.lang.String[] { "From", "To", }); internal_static_ServerName_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerName_descriptor, new java.lang.String[] { "HostName", "Port", "StartCode", }); internal_static_Coprocessor_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_Coprocessor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Coprocessor_descriptor, new java.lang.String[] { "Name", }); internal_static_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameStringPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameBytesPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_BytesBytesPair_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_BytesBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BytesBytesPair_descriptor, new java.lang.String[] { "First", "Second", }); internal_static_NameInt64Pair_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_NameInt64Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameInt64Pair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_SnapshotDescription_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotDescription_descriptor, new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", }); internal_static_ProcedureDescription_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_ProcedureDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ProcedureDescription_descriptor, new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", }); internal_static_EmptyMsg_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_EmptyMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EmptyMsg_descriptor, new java.lang.String[] { }); internal_static_LongMsg_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_LongMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LongMsg_descriptor, new java.lang.String[] { "LongMsg", }); internal_static_DoubleMsg_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_DoubleMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DoubleMsg_descriptor, new java.lang.String[] { "DoubleMsg", }); internal_static_BigDecimalMsg_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(19); internal_static_BigDecimalMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BigDecimalMsg_descriptor, new java.lang.String[] { "BigdecimalMsg", }); internal_static_UUID_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(20); internal_static_UUID_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UUID_descriptor, new java.lang.String[] { "LeastSigBits", "MostSigBits", }); internal_static_NamespaceDescriptor_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(21); internal_static_NamespaceDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NamespaceDescriptor_descriptor, new java.lang.String[] { "Name", "Configuration", }); internal_static_RegionServerInfo_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(22); internal_static_RegionServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerInfo_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 8297a61..cc94c16 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -42289,6 +42289,10752 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:SecurityCapabilitiesResponse) } + public interface ListTablesOfGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code ListTablesOfGroupRequest} + */ + public static final class ListTablesOfGroupRequest extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfGroupRequestOrBuilder { + // Use ListTablesOfGroupRequest.newBuilder() to construct. + private ListTablesOfGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfGroupRequest defaultInstance; + public static ListTablesOfGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListTablesOfGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ListTablesOfGroupRequest) + } + + static { + defaultInstance = new ListTablesOfGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListTablesOfGroupRequest) + } + + public interface ListTablesOfGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .TableName table_name = 1; + /** + * repeated .TableName table_name = 1; + */ + java.util.List + getTableNameList(); + /** + * repeated .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index); + /** + * repeated .TableName table_name = 1; + */ + int getTableNameCount(); + /** + * repeated .TableName table_name = 1; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code ListTablesOfGroupResponse} + */ + public static final class ListTablesOfGroupResponse extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfGroupResponseOrBuilder { + // Use ListTablesOfGroupResponse.newBuilder() to construct. + private ListTablesOfGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfGroupResponse defaultInstance; + public static ListTablesOfGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.util.List tableName_; + /** + * repeated .TableName table_name = 1; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .TableName table_name = 1; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(1, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse) obj; + + boolean result = true; + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListTablesOfGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListTablesOfGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse(this); + int from_bitField0_ = bitField0_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse.getDefaultInstance()) return this; + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTablesOfGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .TableName table_name = 1; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .TableName table_name = 1; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .TableName table_name = 1; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 1; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ListTablesOfGroupResponse) + } + + static { + defaultInstance = new ListTablesOfGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListTablesOfGroupResponse) + } + + public interface GetGroupInfoRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code GetGroupInfoRequest} + */ + public static final class GetGroupInfoRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoRequestOrBuilder { + // Use GetGroupInfoRequest.newBuilder() to construct. + private GetGroupInfoRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoRequest defaultInstance; + public static GetGroupInfoRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoRequest) + } + + static { + defaultInstance = new GetGroupInfoRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoRequest) + } + + public interface GetGroupInfoResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .GroupInfo group_info = 1; + /** + * optional .GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoResponse} + */ + public static final class GetGroupInfoResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoResponseOrBuilder { + // Use GetGroupInfoResponse.newBuilder() to construct. + private GetGroupInfoResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoResponse defaultInstance; + public static GetGroupInfoResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoResponse) + } + + static { + defaultInstance = new GetGroupInfoResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoResponse) + } + + public interface GetGroupInfoOfTableRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableName table_name = 1; + /** + * required .TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoOfTableRequest} + */ + public static final class GetGroupInfoOfTableRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfTableRequestOrBuilder { + // Use GetGroupInfoOfTableRequest.newBuilder() to construct. + private GetGroupInfoOfTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfTableRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfTableRequest defaultInstance; + public static GetGroupInfoOfTableRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfTableRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfTableRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfTableRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoOfTableRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoOfTableRequest) + } + + static { + defaultInstance = new GetGroupInfoOfTableRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoOfTableRequest) + } + + public interface GetGroupInfoOfTableResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .GroupInfo group_info = 1; + /** + * optional .GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoOfTableResponse} + */ + public static final class GetGroupInfoOfTableResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfTableResponseOrBuilder { + // Use GetGroupInfoOfTableResponse.newBuilder() to construct. + private GetGroupInfoOfTableResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfTableResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfTableResponse defaultInstance; + public static GetGroupInfoOfTableResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfTableResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfTableResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfTableResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfTableResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoOfTableResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfTableResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoOfTableResponse) + } + + static { + defaultInstance = new GetGroupInfoOfTableResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoOfTableResponse) + } + + public interface MoveServersRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .HostPort servers = 2; + /** + * repeated .HostPort servers = 2; + */ + java.util.List + getServersList(); + /** + * repeated .HostPort servers = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index); + /** + * repeated .HostPort servers = 2; + */ + int getServersCount(); + /** + * repeated .HostPort servers = 2; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .HostPort servers = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index); + } + /** + * Protobuf type {@code MoveServersRequest} + */ + public static final class MoveServersRequest extends + com.google.protobuf.GeneratedMessage + implements MoveServersRequestOrBuilder { + // Use MoveServersRequest.newBuilder() to construct. + private MoveServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersRequest defaultInstance; + public static MoveServersRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveServersRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .HostPort servers = 2; + public static final int SERVERS_FIELD_NUMBER = 2; + private java.util.List servers_; + /** + * repeated .HostPort servers = 2; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .HostPort servers = 2; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .HostPort servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + return servers_.get(index); + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + servers_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(2, servers_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, servers_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveServersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .HostPort servers = 2; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serversBuilder_; + + /** + * repeated .HostPort servers = 2; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .HostPort servers = 2; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .HostPort servers = 2; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .HostPort servers = 2; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .HostPort servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .HostPort servers = 2; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MoveServersRequest) + } + + static { + defaultInstance = new MoveServersRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveServersRequest) + } + + public interface MoveServersResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code MoveServersResponse} + */ + public static final class MoveServersResponse extends + com.google.protobuf.GeneratedMessage + implements MoveServersResponseOrBuilder { + // Use MoveServersResponse.newBuilder() to construct. + private MoveServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersResponse defaultInstance; + public static MoveServersResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveServersResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveServersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveServersResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:MoveServersResponse) + } + + static { + defaultInstance = new MoveServersResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveServersResponse) + } + + public interface MoveTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .TableName table_name = 2; + /** + * repeated .TableName table_name = 2; + */ + java.util.List + getTableNameList(); + /** + * repeated .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index); + /** + * repeated .TableName table_name = 2; + */ + int getTableNameCount(); + /** + * repeated .TableName table_name = 2; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code MoveTablesRequest} + */ + public static final class MoveTablesRequest extends + com.google.protobuf.GeneratedMessage + implements MoveTablesRequestOrBuilder { + // Use MoveTablesRequest.newBuilder() to construct. + private MoveTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesRequest defaultInstance; + public static MoveTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private java.util.List tableName_; + /** + * repeated .TableName table_name = 2; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .TableName table_name = 2; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(2, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .TableName table_name = 2; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .TableName table_name = 2; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .TableName table_name = 2; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName table_name = 2; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:MoveTablesRequest) + } + + static { + defaultInstance = new MoveTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveTablesRequest) + } + + public interface MoveTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code MoveTablesResponse} + */ + public static final class MoveTablesResponse extends + com.google.protobuf.GeneratedMessage + implements MoveTablesResponseOrBuilder { + // Use MoveTablesResponse.newBuilder() to construct. + private MoveTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesResponse defaultInstance; + public static MoveTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code MoveTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MoveTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:MoveTablesResponse) + } + + static { + defaultInstance = new MoveTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:MoveTablesResponse) + } + + public interface AddGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code AddGroupRequest} + */ + public static final class AddGroupRequest extends + com.google.protobuf.GeneratedMessage + implements AddGroupRequestOrBuilder { + // Use AddGroupRequest.newBuilder() to construct. + private AddGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddGroupRequest defaultInstance; + public static AddGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public AddGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AddGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:AddGroupRequest) + } + + static { + defaultInstance = new AddGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddGroupRequest) + } + + public interface AddGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code AddGroupResponse} + */ + public static final class AddGroupResponse extends + com.google.protobuf.GeneratedMessage + implements AddGroupResponseOrBuilder { + // Use AddGroupResponse.newBuilder() to construct. + private AddGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddGroupResponse defaultInstance; + public static AddGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public AddGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AddGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_AddGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:AddGroupResponse) + } + + static { + defaultInstance = new AddGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddGroupResponse) + } + + public interface RemoveGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code RemoveGroupRequest} + */ + public static final class RemoveGroupRequest extends + com.google.protobuf.GeneratedMessage + implements RemoveGroupRequestOrBuilder { + // Use RemoveGroupRequest.newBuilder() to construct. + private RemoveGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveGroupRequest defaultInstance; + public static RemoveGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public RemoveGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RemoveGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:RemoveGroupRequest) + } + + static { + defaultInstance = new RemoveGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoveGroupRequest) + } + + public interface RemoveGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code RemoveGroupResponse} + */ + public static final class RemoveGroupResponse extends + com.google.protobuf.GeneratedMessage + implements RemoveGroupResponseOrBuilder { + // Use RemoveGroupResponse.newBuilder() to construct. + private RemoveGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveGroupResponse defaultInstance; + public static RemoveGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public RemoveGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RemoveGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_RemoveGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:RemoveGroupResponse) + } + + static { + defaultInstance = new RemoveGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RemoveGroupResponse) + } + + public interface BalanceGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string group_name = 1; + /** + * required string group_name = 1; + */ + boolean hasGroupName(); + /** + * required string group_name = 1; + */ + java.lang.String getGroupName(); + /** + * required string group_name = 1; + */ + com.google.protobuf.ByteString + getGroupNameBytes(); + } + /** + * Protobuf type {@code BalanceGroupRequest} + */ + public static final class BalanceGroupRequest extends + com.google.protobuf.GeneratedMessage + implements BalanceGroupRequestOrBuilder { + // Use BalanceGroupRequest.newBuilder() to construct. + private BalanceGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceGroupRequest defaultInstance; + public static BalanceGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public BalanceGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + groupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string group_name = 1; + public static final int GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object groupName_; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + groupName_ = s; + } + return s; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + groupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest) obj; + + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); + if (hasGroupName()) { + result = result && getGroupName() + .equals(other.getGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupName()) { + hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BalanceGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + groupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.groupName_ = groupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.getDefaultInstance()) return this; + if (other.hasGroupName()) { + bitField0_ |= 0x00000001; + groupName_ = other.groupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string group_name = 1; + private java.lang.Object groupName_ = ""; + /** + * required string group_name = 1; + */ + public boolean hasGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string group_name = 1; + */ + public java.lang.String getGroupName() { + java.lang.Object ref = groupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string group_name = 1; + */ + public com.google.protobuf.ByteString + getGroupNameBytes() { + java.lang.Object ref = groupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + groupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string group_name = 1; + */ + public Builder setGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder clearGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + groupName_ = getDefaultInstance().getGroupName(); + onChanged(); + return this; + } + /** + * required string group_name = 1; + */ + public Builder setGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + groupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BalanceGroupRequest) + } + + static { + defaultInstance = new BalanceGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BalanceGroupRequest) + } + + public interface BalanceGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool balanceRan = 1; + /** + * required bool balanceRan = 1; + */ + boolean hasBalanceRan(); + /** + * required bool balanceRan = 1; + */ + boolean getBalanceRan(); + } + /** + * Protobuf type {@code BalanceGroupResponse} + */ + public static final class BalanceGroupResponse extends + com.google.protobuf.GeneratedMessage + implements BalanceGroupResponseOrBuilder { + // Use BalanceGroupResponse.newBuilder() to construct. + private BalanceGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceGroupResponse defaultInstance; + public static BalanceGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public BalanceGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + balanceRan_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool balanceRan = 1; + public static final int BALANCERAN_FIELD_NUMBER = 1; + private boolean balanceRan_; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + + private void initFields() { + balanceRan_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBalanceRan()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, balanceRan_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, balanceRan_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse) obj; + + boolean result = true; + result = result && (hasBalanceRan() == other.hasBalanceRan()); + if (hasBalanceRan()) { + result = result && (getBalanceRan() + == other.getBalanceRan()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBalanceRan()) { + hash = (37 * hash) + BALANCERAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBalanceRan()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code BalanceGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + balanceRan_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_BalanceGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.balanceRan_ = balanceRan_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance()) return this; + if (other.hasBalanceRan()) { + setBalanceRan(other.getBalanceRan()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBalanceRan()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool balanceRan = 1; + private boolean balanceRan_ ; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + /** + * required bool balanceRan = 1; + */ + public Builder setBalanceRan(boolean value) { + bitField0_ |= 0x00000001; + balanceRan_ = value; + onChanged(); + return this; + } + /** + * required bool balanceRan = 1; + */ + public Builder clearBalanceRan() { + bitField0_ = (bitField0_ & ~0x00000001); + balanceRan_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:BalanceGroupResponse) + } + + static { + defaultInstance = new BalanceGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:BalanceGroupResponse) + } + + public interface ListGroupInfosRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code ListGroupInfosRequest} + */ + public static final class ListGroupInfosRequest extends + com.google.protobuf.GeneratedMessage + implements ListGroupInfosRequestOrBuilder { + // Use ListGroupInfosRequest.newBuilder() to construct. + private ListGroupInfosRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListGroupInfosRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListGroupInfosRequest defaultInstance; + public static ListGroupInfosRequest getDefaultInstance() { + return defaultInstance; + } + + public ListGroupInfosRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListGroupInfosRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListGroupInfosRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListGroupInfosRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListGroupInfosRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:ListGroupInfosRequest) + } + + static { + defaultInstance = new ListGroupInfosRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListGroupInfosRequest) + } + + public interface ListGroupInfosResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .GroupInfo group_info = 1; + /** + * repeated .GroupInfo group_info = 1; + */ + java.util.List + getGroupInfoList(); + /** + * repeated .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(int index); + /** + * repeated .GroupInfo group_info = 1; + */ + int getGroupInfoCount(); + /** + * repeated .GroupInfo group_info = 1; + */ + java.util.List + getGroupInfoOrBuilderList(); + /** + * repeated .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code ListGroupInfosResponse} + */ + public static final class ListGroupInfosResponse extends + com.google.protobuf.GeneratedMessage + implements ListGroupInfosResponseOrBuilder { + // Use ListGroupInfosResponse.newBuilder() to construct. + private ListGroupInfosResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListGroupInfosResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListGroupInfosResponse defaultInstance; + public static ListGroupInfosResponse getDefaultInstance() { + return defaultInstance; + } + + public ListGroupInfosResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListGroupInfosResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + groupInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = java.util.Collections.unmodifiableList(groupInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListGroupInfosResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListGroupInfosResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private java.util.List groupInfo_; + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List getGroupInfoList() { + return groupInfo_; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoOrBuilderList() { + return groupInfo_; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public int getGroupInfoCount() { + return groupInfo_.size(); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(int index) { + return groupInfo_.get(index); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index) { + return groupInfo_.get(index); + } + + private void initFields() { + groupInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getGroupInfoCount(); i++) { + if (!getGroupInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < groupInfo_.size(); i++) { + output.writeMessage(1, groupInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < groupInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse) obj; + + boolean result = true; + result = result && getGroupInfoList() + .equals(other.getGroupInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getGroupInfoCount() > 0) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ListGroupInfosResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + groupInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_ListGroupInfosResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse(this); + int from_bitField0_ = bitField0_; + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = java.util.Collections.unmodifiableList(groupInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance()) return this; + if (groupInfoBuilder_ == null) { + if (!other.groupInfo_.isEmpty()) { + if (groupInfo_.isEmpty()) { + groupInfo_ = other.groupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureGroupInfoIsMutable(); + groupInfo_.addAll(other.groupInfo_); + } + onChanged(); + } + } else { + if (!other.groupInfo_.isEmpty()) { + if (groupInfoBuilder_.isEmpty()) { + groupInfoBuilder_.dispose(); + groupInfoBuilder_ = null; + groupInfo_ = other.groupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + groupInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getGroupInfoFieldBuilder() : null; + } else { + groupInfoBuilder_.addAllMessages(other.groupInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getGroupInfoCount(); i++) { + if (!getGroupInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .GroupInfo group_info = 1; + private java.util.List groupInfo_ = + java.util.Collections.emptyList(); + private void ensureGroupInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + groupInfo_ = new java.util.ArrayList(groupInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List getGroupInfoList() { + if (groupInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(groupInfo_); + } else { + return groupInfoBuilder_.getMessageList(); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public int getGroupInfoCount() { + if (groupInfoBuilder_ == null) { + return groupInfo_.size(); + } else { + return groupInfoBuilder_.getCount(); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(int index) { + if (groupInfoBuilder_ == null) { + return groupInfo_.get(index); + } else { + return groupInfoBuilder_.getMessage(index); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.set(index, value); + onChanged(); + } else { + groupInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.add(value); + onChanged(); + } else { + groupInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureGroupInfoIsMutable(); + groupInfo_.add(index, value); + onChanged(); + } else { + groupInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.add(builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + groupInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder addAllGroupInfo( + java.lang.Iterable values) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + super.addAll(values, groupInfo_); + onChanged(); + } else { + groupInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public Builder removeGroupInfo(int index) { + if (groupInfoBuilder_ == null) { + ensureGroupInfoIsMutable(); + groupInfo_.remove(index); + onChanged(); + } else { + groupInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder( + int index) { + return getGroupInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder( + int index) { + if (groupInfoBuilder_ == null) { + return groupInfo_.get(index); } else { + return groupInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoOrBuilderList() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(groupInfo_); + } + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder addGroupInfoBuilder() { + return getGroupInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder addGroupInfoBuilder( + int index) { + return getGroupInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()); + } + /** + * repeated .GroupInfo group_info = 1; + */ + public java.util.List + getGroupInfoBuilderList() { + return getGroupInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ListGroupInfosResponse) + } + + static { + defaultInstance = new ListGroupInfosResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListGroupInfosResponse) + } + + public interface GetGroupInfoOfServerRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .HostPort server = 1; + /** + * required .HostPort server = 1; + */ + boolean hasServer(); + /** + * required .HostPort server = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServer(); + /** + * required .HostPort server = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServerOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoOfServerRequest} + */ + public static final class GetGroupInfoOfServerRequest extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfServerRequestOrBuilder { + // Use GetGroupInfoOfServerRequest.newBuilder() to construct. + private GetGroupInfoOfServerRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfServerRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfServerRequest defaultInstance; + public static GetGroupInfoOfServerRequest getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfServerRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfServerRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = server_.toBuilder(); + } + server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(server_); + server_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfServerRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfServerRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .HostPort server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort server_; + /** + * required .HostPort server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServer() { + return server_; + } + /** + * required .HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServerOrBuilder() { + return server_; + } + + private void initFields() { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, server_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, server_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoOfServerRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.getDefaultInstance()) return this; + if (other.hasServer()) { + mergeServer(other.getServer()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!getServer().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .HostPort server = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serverBuilder_; + /** + * required .HostPort server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + /** + * required .HostPort server = 1; + */ + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .HostPort server = 1; + */ + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .HostPort server = 1; + */ + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .HostPort server = 1; + */ + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + /** + * required .HostPort server = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + /** + * required .HostPort server = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoOfServerRequest) + } + + static { + defaultInstance = new GetGroupInfoOfServerRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoOfServerRequest) + } + + public interface GetGroupInfoOfServerResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .GroupInfo group_info = 1; + /** + * optional .GroupInfo group_info = 1; + */ + boolean hasGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo(); + /** + * optional .GroupInfo group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code GetGroupInfoOfServerResponse} + */ + public static final class GetGroupInfoOfServerResponse extends + com.google.protobuf.GeneratedMessage + implements GetGroupInfoOfServerResponseOrBuilder { + // Use GetGroupInfoOfServerResponse.newBuilder() to construct. + private GetGroupInfoOfServerResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetGroupInfoOfServerResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetGroupInfoOfServerResponse defaultInstance; + public static GetGroupInfoOfServerResponse getDefaultInstance() { + return defaultInstance; + } + + public GetGroupInfoOfServerResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetGroupInfoOfServerResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = groupInfo_.toBuilder(); + } + groupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(groupInfo_); + groupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetGroupInfoOfServerResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetGroupInfoOfServerResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .GroupInfo group_info = 1; + public static final int GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + return groupInfo_; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + return groupInfo_; + } + + private void initFields() { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, groupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, groupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse) obj; + + boolean result = true; + result = result && (hasGroupInfo() == other.hasGroupInfo()); + if (hasGroupInfo()) { + result = result && getGroupInfo() + .equals(other.getGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasGroupInfo()) { + hash = (37 * hash) + GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetGroupInfoOfServerResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetGroupInfoOfServerResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (groupInfoBuilder_ == null) { + result.groupInfo_ = groupInfo_; + } else { + result.groupInfo_ = groupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance()) return this; + if (other.hasGroupInfo()) { + mergeGroupInfo(other.getGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasGroupInfo()) { + if (!getGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .GroupInfo group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> groupInfoBuilder_; + /** + * optional .GroupInfo group_info = 1; + */ + public boolean hasGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getGroupInfo() { + if (groupInfoBuilder_ == null) { + return groupInfo_; + } else { + return groupInfoBuilder_.getMessage(); + } + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + groupInfo_ = value; + onChanged(); + } else { + groupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder setGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder builderForValue) { + if (groupInfoBuilder_ == null) { + groupInfo_ = builderForValue.build(); + onChanged(); + } else { + groupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder mergeGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo value) { + if (groupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + groupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) { + groupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder(groupInfo_).mergeFrom(value).buildPartial(); + } else { + groupInfo_ = value; + } + onChanged(); + } else { + groupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public Builder clearGroupInfo() { + if (groupInfoBuilder_ == null) { + groupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + onChanged(); + } else { + groupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder getGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .GroupInfo group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder getGroupInfoOrBuilder() { + if (groupInfoBuilder_ != null) { + return groupInfoBuilder_.getMessageOrBuilder(); + } else { + return groupInfo_; + } + } + /** + * optional .GroupInfo group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder> + getGroupInfoFieldBuilder() { + if (groupInfoBuilder_ == null) { + groupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder>( + groupInfo_, + getParentForChildren(), + isClean()); + groupInfo_ = null; + } + return groupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetGroupInfoOfServerResponse) + } + + static { + defaultInstance = new GetGroupInfoOfServerResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetGroupInfoOfServerResponse) + } + /** * Protobuf service {@code MasterService} */ @@ -42851,6 +53597,78 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetGroupInfo(.GetGroupInfoRequest) returns (.GetGroupInfoResponse); + */ + public abstract void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfTable(.GetGroupInfoOfTableRequest) returns (.GetGroupInfoOfTableResponse); + */ + public abstract void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfServer(.GetGroupInfoOfServerRequest) returns (.GetGroupInfoOfServerResponse); + */ + public abstract void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.MoveServersRequest) returns (.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.MoveTablesRequest) returns (.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddGroup(.AddGroupRequest) returns (.AddGroupResponse); + */ + public abstract void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveGroup(.RemoveGroupRequest) returns (.RemoveGroupResponse); + */ + public abstract void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceGroup(.BalanceGroupRequest) returns (.BalanceGroupResponse); + */ + public abstract void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListGroupInfos(.ListGroupInfosRequest) returns (.ListGroupInfosResponse); + */ + public abstract void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -43208,6 +54026,78 @@ public final class MasterProtos { impl.getSecurityCapabilities(controller, request, done); } + @java.lang.Override + public void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfo(controller, request, done); + } + + @java.lang.Override + public void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfoOfTable(controller, request, done); + } + + @java.lang.Override + public void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done) { + impl.getGroupInfoOfServer(controller, request, done); + } + + @java.lang.Override + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveServers(controller, request, done); + } + + @java.lang.Override + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveTables(controller, request, done); + } + + @java.lang.Override + public void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.addGroup(controller, request, done); + } + + @java.lang.Override + public void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.removeGroup(controller, request, done); + } + + @java.lang.Override + public void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.balanceGroup(controller, request, done); + } + + @java.lang.Override + public void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done) { + impl.listGroupInfos(controller, request, done); + } + }; } @@ -43318,6 +54208,24 @@ public final class MasterProtos { return impl.truncateTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest)request); case 43: return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); + case 44: + return impl.getGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest)request); + case 45: + return impl.getGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest)request); + case 46: + return impl.getGroupInfoOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest)request); + case 47: + return impl.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest)request); + case 48: + return impl.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest)request); + case 49: + return impl.addGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest)request); + case 50: + return impl.removeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest)request); + case 51: + return impl.balanceGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest)request); + case 52: + return impl.listGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -43420,6 +54328,24 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + case 44: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.getDefaultInstance(); + case 49: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.getDefaultInstance(); + case 51: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.getDefaultInstance(); + case 52: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -43522,6 +54448,24 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + case 44: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance(); + case 49: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance(); + case 51: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance(); + case 52: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -44084,6 +55028,78 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetGroupInfo(.GetGroupInfoRequest) returns (.GetGroupInfoResponse); + */ + public abstract void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfTable(.GetGroupInfoOfTableRequest) returns (.GetGroupInfoOfTableResponse); + */ + public abstract void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetGroupInfoOfServer(.GetGroupInfoOfServerRequest) returns (.GetGroupInfoOfServerResponse); + */ + public abstract void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.MoveServersRequest) returns (.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.MoveTablesRequest) returns (.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddGroup(.AddGroupRequest) returns (.AddGroupResponse); + */ + public abstract void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveGroup(.RemoveGroupRequest) returns (.RemoveGroupResponse); + */ + public abstract void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceGroup(.BalanceGroupRequest) returns (.BalanceGroupResponse); + */ + public abstract void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListGroupInfos(.ListGroupInfosRequest) returns (.ListGroupInfosResponse); + */ + public abstract void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -44316,14 +55332,59 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: - this.truncateTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( + case 42: + this.truncateTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 43: + this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 44: + this.getGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 45: + this.getGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 46: + this.getGroupInfoOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 47: + this.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 48: + this.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 49: + this.addGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 50: + this.removeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 51: + this.balanceGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: - this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( + case 52: + this.listGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( done)); return; default: @@ -44428,6 +55489,24 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest.getDefaultInstance(); case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + case 44: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest.getDefaultInstance(); + case 49: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest.getDefaultInstance(); + case 51: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest.getDefaultInstance(); + case 52: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -44530,6 +55609,24 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse.getDefaultInstance(); case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + case 44: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance(); + case 45: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance(); + case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance(); + case 49: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance(); + case 51: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance(); + case 52: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -45210,6 +56307,141 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance())); } + + public void getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(44), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance())); + } + + public void getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(45), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance())); + } + + public void getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(46), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance())); + } + + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(47), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance())); + } + + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(48), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance())); + } + + public void addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(49), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance())); + } + + public void removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(50), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance())); + } + + public void balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(51), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance())); + } + + public void listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(52), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -45437,6 +56669,51 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -45973,6 +57250,114 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse getGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(44), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse getGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(45), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse getGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(46), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(47), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(48), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse addGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(49), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse removeGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(50), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse balanceGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(51), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse listGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(52), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -46408,6 +57793,106 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SecurityCapabilitiesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListTablesOfGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListTablesOfGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListTablesOfGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListTablesOfGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoOfTableRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoOfTableResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveServersRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveServersRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveServersResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveServersResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_MoveTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_MoveTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoveGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoveGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RemoveGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RemoveGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BalanceGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BalanceGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_BalanceGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_BalanceGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListGroupInfosRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListGroupInfosRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListGroupInfosResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListGroupInfosResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoOfServerRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoOfServerRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetGroupInfoOfServerResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetGroupInfoOfServerResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -46418,203 +57903,241 @@ public final class MasterProtos { static { java.lang.String[] descriptorData = { "\n\014Master.proto\032\013HBase.proto\032\014Client.prot" + - "o\032\023ClusterStatus.proto\"`\n\020AddColumnReque" + - "st\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017co" + - "lumn_families\030\002 \002(\0132\023.ColumnFamilySchema" + - "\"\023\n\021AddColumnResponse\"J\n\023DeleteColumnReq" + - "uest\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022\023\n\013" + - "column_name\030\002 \002(\014\"\026\n\024DeleteColumnRespons" + - "e\"c\n\023ModifyColumnRequest\022\036\n\ntable_name\030\001" + - " \002(\0132\n.TableName\022,\n\017column_families\030\002 \002(" + - "\0132\023.ColumnFamilySchema\"\026\n\024ModifyColumnRe", - "sponse\"\\\n\021MoveRegionRequest\022 \n\006region\030\001 " + - "\002(\0132\020.RegionSpecifier\022%\n\020dest_server_nam" + - "e\030\002 \001(\0132\013.ServerName\"\024\n\022MoveRegionRespon" + - "se\"\200\001\n\035DispatchMergingRegionsRequest\022\"\n\010" + - "region_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010regi" + - "on_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcible" + - "\030\003 \001(\010:\005false\" \n\036DispatchMergingRegionsR" + - "esponse\"7\n\023AssignRegionRequest\022 \n\006region" + - "\030\001 \002(\0132\020.RegionSpecifier\"\026\n\024AssignRegion" + - "Response\"O\n\025UnassignRegionRequest\022 \n\006reg", - "ion\030\001 \002(\0132\020.RegionSpecifier\022\024\n\005force\030\002 \001" + - "(\010:\005false\"\030\n\026UnassignRegionResponse\"8\n\024O" + - "fflineRegionRequest\022 \n\006region\030\001 \002(\0132\020.Re" + - "gionSpecifier\"\027\n\025OfflineRegionResponse\"L" + - "\n\022CreateTableRequest\022\"\n\014table_schema\030\001 \002" + - "(\0132\014.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\"\025\n\023" + - "CreateTableResponse\"4\n\022DeleteTableReques" + - "t\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023Del" + - "eteTableResponse\"4\n\022EnableTableRequest\022\036" + - "\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023Enable", - "TableResponse\"5\n\023DisableTableRequest\022\036\n\n" + - "table_name\030\001 \002(\0132\n.TableName\"\026\n\024DisableT" + - "ableResponse\"X\n\022ModifyTableRequest\022\036\n\nta" + - "ble_name\030\001 \002(\0132\n.TableName\022\"\n\014table_sche" + - "ma\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableRes" + - "ponse\"K\n\026CreateNamespaceRequest\0221\n\023names" + - "paceDescriptor\030\001 \002(\0132\024.NamespaceDescript" + - "or\"\031\n\027CreateNamespaceResponse\"/\n\026DeleteN" + - "amespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"\031" + - "\n\027DeleteNamespaceResponse\"K\n\026ModifyNames", - "paceRequest\0221\n\023namespaceDescriptor\030\001 \002(\013" + - "2\024.NamespaceDescriptor\"\031\n\027ModifyNamespac" + - "eResponse\"6\n\035GetNamespaceDescriptorReque" + - "st\022\025\n\rnamespaceName\030\001 \002(\t\"S\n\036GetNamespac" + - "eDescriptorResponse\0221\n\023namespaceDescript" + - "or\030\001 \002(\0132\024.NamespaceDescriptor\"!\n\037ListNa" + - "mespaceDescriptorsRequest\"U\n ListNamespa" + - "ceDescriptorsResponse\0221\n\023namespaceDescri" + - "ptor\030\001 \003(\0132\024.NamespaceDescriptor\"?\n&List" + - "TableDescriptorsByNamespaceRequest\022\025\n\rna", - "mespaceName\030\001 \002(\t\"L\n\'ListTableDescriptor" + - "sByNamespaceResponse\022!\n\013tableSchema\030\001 \003(" + - "\0132\014.TableSchema\"9\n ListTableNamesByNames" + - "paceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"B\n!Li" + - "stTableNamesByNamespaceResponse\022\035\n\ttable" + - "Name\030\001 \003(\0132\n.TableName\"\021\n\017ShutdownReques" + - "t\"\022\n\020ShutdownResponse\"\023\n\021StopMasterReque" + - "st\"\024\n\022StopMasterResponse\"\020\n\016BalanceReque" + - "st\"\'\n\017BalanceResponse\022\024\n\014balancer_ran\030\001 " + - "\002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002on\030\001", - " \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBalancer" + - "RunningResponse\022\032\n\022prev_balance_value\030\001 " + - "\001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031IsBal" + - "ancerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n" + - "\025RunCatalogScanRequest\"-\n\026RunCatalogScan" + - "Response\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableC" + - "atalogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034" + - "EnableCatalogJanitorResponse\022\022\n\nprev_val" + - "ue\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledReque" + - "st\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n", - "\005value\030\001 \002(\010\"9\n\017SnapshotRequest\022&\n\010snaps" + - "hot\030\001 \002(\0132\024.SnapshotDescription\",\n\020Snaps" + - "hotResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n" + - "\034GetCompletedSnapshotsRequest\"H\n\035GetComp" + - "letedSnapshotsResponse\022\'\n\tsnapshots\030\001 \003(" + - "\0132\024.SnapshotDescription\"?\n\025DeleteSnapsho" + - "tRequest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDes" + - "cription\"\030\n\026DeleteSnapshotResponse\"@\n\026Re" + - "storeSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024" + - ".SnapshotDescription\"\031\n\027RestoreSnapshotR", - "esponse\"?\n\025IsSnapshotDoneRequest\022&\n\010snap" + - "shot\030\001 \001(\0132\024.SnapshotDescription\"U\n\026IsSn" + - "apshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022" + - "&\n\010snapshot\030\002 \001(\0132\024.SnapshotDescription\"" + - "F\n\034IsRestoreSnapshotDoneRequest\022&\n\010snaps" + - "hot\030\001 \001(\0132\024.SnapshotDescription\"4\n\035IsRes" + - "toreSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005" + - "false\"=\n\033GetSchemaAlterStatusRequest\022\036\n\n" + - "table_name\030\001 \002(\0132\n.TableName\"T\n\034GetSchem" + - "aAlterStatusResponse\022\035\n\025yet_to_update_re", - "gions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"=\n\032Ge" + - "tTableDescriptorsRequest\022\037\n\013table_names\030" + - "\001 \003(\0132\n.TableName\"A\n\033GetTableDescriptors" + - "Response\022\"\n\014table_schema\030\001 \003(\0132\014.TableSc" + - "hema\"\026\n\024GetTableNamesRequest\"8\n\025GetTable" + - "NamesResponse\022\037\n\013table_names\030\001 \003(\0132\n.Tab" + - "leName\"\031\n\027GetClusterStatusRequest\"B\n\030Get" + - "ClusterStatusResponse\022&\n\016cluster_status\030" + - "\001 \002(\0132\016.ClusterStatus\"\030\n\026IsMasterRunning" + - "Request\"4\n\027IsMasterRunningResponse\022\031\n\021is", - "_master_running\030\001 \002(\010\"@\n\024ExecProcedureRe" + - "quest\022(\n\tprocedure\030\001 \002(\0132\025.ProcedureDesc" + - "ription\"1\n\025ExecProcedureResponse\022\030\n\020expe" + - "cted_timeout\030\001 \002(\003\"B\n\026IsProcedureDoneReq" + - "uest\022(\n\tprocedure\030\001 \001(\0132\025.ProcedureDescr" + - "iption\"W\n\027IsProcedureDoneResponse\022\023\n\004don" + - "e\030\001 \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\0132\025.Proce" + - "dureDescription\"T\n\024TruncateTableRequest\022" + - "\035\n\ttableName\030\001 \002(\0132\n.TableName\022\035\n\016preser" + - "veSplits\030\002 \001(\010:\005false\"\027\n\025TruncateTableRe", - "sponse\"\035\n\033SecurityCapabilitiesRequest\"\343\001" + - "\n\034SecurityCapabilitiesResponse\022>\n\014capabi" + - "lities\030\001 \003(\0162(.SecurityCapabilitiesRespo" + - "nse.Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_" + - "AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATIO" + - "N\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZA" + - "TION\020\003\022\023\n\017CELL_VISIBILITY\020\0042\322\030\n\rMasterSe" + - "rvice\022S\n\024GetSchemaAlterStatus\022\034.GetSchem" + - "aAlterStatusRequest\032\035.GetSchemaAlterStat" + - "usResponse\022P\n\023GetTableDescriptors\022\033.GetT", - "ableDescriptorsRequest\032\034.GetTableDescrip" + - "torsResponse\022>\n\rGetTableNames\022\025.GetTable" + - "NamesRequest\032\026.GetTableNamesResponse\022G\n\020" + - "GetClusterStatus\022\030.GetClusterStatusReque" + - "st\032\031.GetClusterStatusResponse\022D\n\017IsMaste" + - "rRunning\022\027.IsMasterRunningRequest\032\030.IsMa" + - "sterRunningResponse\0222\n\tAddColumn\022\021.AddCo" + - "lumnRequest\032\022.AddColumnResponse\022;\n\014Delet" + - "eColumn\022\024.DeleteColumnRequest\032\025.DeleteCo" + - "lumnResponse\022;\n\014ModifyColumn\022\024.ModifyCol", - "umnRequest\032\025.ModifyColumnResponse\0225\n\nMov" + - "eRegion\022\022.MoveRegionRequest\032\023.MoveRegion" + - "Response\022Y\n\026DispatchMergingRegions\022\036.Dis" + - "patchMergingRegionsRequest\032\037.DispatchMer" + - "gingRegionsResponse\022;\n\014AssignRegion\022\024.As" + - "signRegionRequest\032\025.AssignRegionResponse" + - "\022A\n\016UnassignRegion\022\026.UnassignRegionReque" + - "st\032\027.UnassignRegionResponse\022>\n\rOfflineRe" + - "gion\022\025.OfflineRegionRequest\032\026.OfflineReg" + - "ionResponse\0228\n\013DeleteTable\022\023.DeleteTable", - "Request\032\024.DeleteTableResponse\0228\n\013EnableT" + - "able\022\023.EnableTableRequest\032\024.EnableTableR" + - "esponse\022;\n\014DisableTable\022\024.DisableTableRe" + - "quest\032\025.DisableTableResponse\0228\n\013ModifyTa" + - "ble\022\023.ModifyTableRequest\032\024.ModifyTableRe" + - "sponse\0228\n\013CreateTable\022\023.CreateTableReque" + - "st\032\024.CreateTableResponse\022/\n\010Shutdown\022\020.S" + - "hutdownRequest\032\021.ShutdownResponse\0225\n\nSto" + - "pMaster\022\022.StopMasterRequest\032\023.StopMaster" + - "Response\022,\n\007Balance\022\017.BalanceRequest\032\020.B", - "alanceResponse\022M\n\022SetBalancerRunning\022\032.S" + - "etBalancerRunningRequest\032\033.SetBalancerRu" + - "nningResponse\022J\n\021IsBalancerEnabled\022\031.IsB" + - "alancerEnabledRequest\032\032.IsBalancerEnable" + - "dResponse\022A\n\016RunCatalogScan\022\026.RunCatalog" + - "ScanRequest\032\027.RunCatalogScanResponse\022S\n\024" + - "EnableCatalogJanitor\022\034.EnableCatalogJani" + - "torRequest\032\035.EnableCatalogJanitorRespons" + - "e\022\\\n\027IsCatalogJanitorEnabled\022\037.IsCatalog" + - "JanitorEnabledRequest\032 .IsCatalogJanitor", - "EnabledResponse\022L\n\021ExecMasterService\022\032.C" + - "oprocessorServiceRequest\032\033.CoprocessorSe" + - "rviceResponse\022/\n\010Snapshot\022\020.SnapshotRequ" + - "est\032\021.SnapshotResponse\022V\n\025GetCompletedSn" + - "apshots\022\035.GetCompletedSnapshotsRequest\032\036" + - ".GetCompletedSnapshotsResponse\022A\n\016Delete" + - "Snapshot\022\026.DeleteSnapshotRequest\032\027.Delet" + - "eSnapshotResponse\022A\n\016IsSnapshotDone\022\026.Is" + - "SnapshotDoneRequest\032\027.IsSnapshotDoneResp" + - "onse\022D\n\017RestoreSnapshot\022\027.RestoreSnapsho", - "tRequest\032\030.RestoreSnapshotResponse\022V\n\025Is" + - "RestoreSnapshotDone\022\035.IsRestoreSnapshotD" + - "oneRequest\032\036.IsRestoreSnapshotDoneRespon" + - "se\022>\n\rExecProcedure\022\025.ExecProcedureReque" + - "st\032\026.ExecProcedureResponse\022D\n\017IsProcedur" + - "eDone\022\027.IsProcedureDoneRequest\032\030.IsProce" + - "dureDoneResponse\022D\n\017ModifyNamespace\022\027.Mo" + - "difyNamespaceRequest\032\030.ModifyNamespaceRe" + - "sponse\022D\n\017CreateNamespace\022\027.CreateNamesp" + - "aceRequest\032\030.CreateNamespaceResponse\022D\n\017", - "DeleteNamespace\022\027.DeleteNamespaceRequest" + - "\032\030.DeleteNamespaceResponse\022Y\n\026GetNamespa" + - "ceDescriptor\022\036.GetNamespaceDescriptorReq" + - "uest\032\037.GetNamespaceDescriptorResponse\022_\n" + - "\030ListNamespaceDescriptors\022 .ListNamespac" + - "eDescriptorsRequest\032!.ListNamespaceDescr" + - "iptorsResponse\022t\n\037ListTableDescriptorsBy" + - "Namespace\022\'.ListTableDescriptorsByNamesp" + - "aceRequest\032(.ListTableDescriptorsByNames" + - "paceResponse\022b\n\031ListTableNamesByNamespac", - "e\022!.ListTableNamesByNamespaceRequest\032\".L" + - "istTableNamesByNamespaceResponse\022>\n\rtrun" + - "cateTable\022\025.TruncateTableRequest\032\026.Trunc" + - "ateTableResponse\022V\n\027getSecurityCapabilit" + - "ies\022\034.SecurityCapabilitiesRequest\032\035.Secu" + - "rityCapabilitiesResponseBB\n*org.apache.h" + - "adoop.hbase.protobuf.generatedB\014MasterPr" + - "otosH\001\210\001\001\240\001\001" + "o\032\023ClusterStatus.proto\032\rRSGroup.proto\"`\n" + + "\020AddColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n." + + "TableName\022,\n\017column_families\030\002 \002(\0132\023.Col" + + "umnFamilySchema\"\023\n\021AddColumnResponse\"J\n\023" + + "DeleteColumnRequest\022\036\n\ntable_name\030\001 \002(\0132" + + "\n.TableName\022\023\n\013column_name\030\002 \002(\014\"\026\n\024Dele" + + "teColumnResponse\"c\n\023ModifyColumnRequest\022" + + "\036\n\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017colum" + + "n_families\030\002 \002(\0132\023.ColumnFamilySchema\"\026\n", + "\024ModifyColumnResponse\"\\\n\021MoveRegionReque" + + "st\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022%\n\020" + + "dest_server_name\030\002 \001(\0132\013.ServerName\"\024\n\022M" + + "oveRegionResponse\"\200\001\n\035DispatchMergingReg" + + "ionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.RegionSp" + + "ecifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpecif" + + "ier\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036Dispatch" + + "MergingRegionsResponse\"7\n\023AssignRegionRe" + + "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"" + + "\026\n\024AssignRegionResponse\"O\n\025UnassignRegio", + "nRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" + + "er\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegi" + + "onResponse\"8\n\024OfflineRegionRequest\022 \n\006re" + + "gion\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025OfflineR" + + "egionResponse\"L\n\022CreateTableRequest\022\"\n\014t" + + "able_schema\030\001 \002(\0132\014.TableSchema\022\022\n\nsplit" + + "_keys\030\002 \003(\014\"\025\n\023CreateTableResponse\"4\n\022De" + + "leteTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.T" + + "ableName\"\025\n\023DeleteTableResponse\"4\n\022Enabl" + + "eTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Tabl", + "eName\"\025\n\023EnableTableResponse\"5\n\023DisableT" + + "ableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.TableN" + + "ame\"\026\n\024DisableTableResponse\"X\n\022ModifyTab" + + "leRequest\022\036\n\ntable_name\030\001 \002(\0132\n.TableNam" + + "e\022\"\n\014table_schema\030\002 \002(\0132\014.TableSchema\"\025\n" + + "\023ModifyTableResponse\"K\n\026CreateNamespaceR" + + "equest\0221\n\023namespaceDescriptor\030\001 \002(\0132\024.Na" + + "mespaceDescriptor\"\031\n\027CreateNamespaceResp" + + "onse\"/\n\026DeleteNamespaceRequest\022\025\n\rnamesp" + + "aceName\030\001 \002(\t\"\031\n\027DeleteNamespaceResponse", + "\"K\n\026ModifyNamespaceRequest\0221\n\023namespaceD" + + "escriptor\030\001 \002(\0132\024.NamespaceDescriptor\"\031\n" + + "\027ModifyNamespaceResponse\"6\n\035GetNamespace" + + "DescriptorRequest\022\025\n\rnamespaceName\030\001 \002(\t" + + "\"S\n\036GetNamespaceDescriptorResponse\0221\n\023na" + + "mespaceDescriptor\030\001 \002(\0132\024.NamespaceDescr" + + "iptor\"!\n\037ListNamespaceDescriptorsRequest" + + "\"U\n ListNamespaceDescriptorsResponse\0221\n\023" + + "namespaceDescriptor\030\001 \003(\0132\024.NamespaceDes" + + "criptor\"?\n&ListTableDescriptorsByNamespa", + "ceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"L\n\'List" + + "TableDescriptorsByNamespaceResponse\022!\n\013t" + + "ableSchema\030\001 \003(\0132\014.TableSchema\"9\n ListTa" + + "bleNamesByNamespaceRequest\022\025\n\rnamespaceN" + + "ame\030\001 \002(\t\"B\n!ListTableNamesByNamespaceRe" + + "sponse\022\035\n\ttableName\030\001 \003(\0132\n.TableName\"\021\n" + + "\017ShutdownRequest\"\022\n\020ShutdownResponse\"\023\n\021" + + "StopMasterRequest\"\024\n\022StopMasterResponse\"" + + "\020\n\016BalanceRequest\"\'\n\017BalanceResponse\022\024\n\014" + + "balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunning", + "Request\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010" + + "\"8\n\032SetBalancerRunningResponse\022\032\n\022prev_b" + + "alance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabledR" + + "equest\",\n\031IsBalancerEnabledResponse\022\017\n\007e" + + "nabled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n" + + "\026RunCatalogScanResponse\022\023\n\013scan_result\030\001" + + " \001(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006e" + + "nable\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespo" + + "nse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJani" + + "torEnabledRequest\"0\n\037IsCatalogJanitorEna", + "bledResponse\022\r\n\005value\030\001 \002(\010\"9\n\017SnapshotR" + + "equest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescr" + + "iption\",\n\020SnapshotResponse\022\030\n\020expected_t" + + "imeout\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsReq" + + "uest\"H\n\035GetCompletedSnapshotsResponse\022\'\n" + + "\tsnapshots\030\001 \003(\0132\024.SnapshotDescription\"?" + + "\n\025DeleteSnapshotRequest\022&\n\010snapshot\030\001 \002(" + + "\0132\024.SnapshotDescription\"\030\n\026DeleteSnapsho" + + "tResponse\"@\n\026RestoreSnapshotRequest\022&\n\010s" + + "napshot\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027R", + "estoreSnapshotResponse\"?\n\025IsSnapshotDone" + + "Request\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDesc" + + "ription\"U\n\026IsSnapshotDoneResponse\022\023\n\004don" + + "e\030\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.Snaps" + + "hotDescription\"F\n\034IsRestoreSnapshotDoneR" + + "equest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescr" + + "iption\"4\n\035IsRestoreSnapshotDoneResponse\022" + + "\023\n\004done\030\001 \001(\010:\005false\"=\n\033GetSchemaAlterSt" + + "atusRequest\022\036\n\ntable_name\030\001 \002(\0132\n.TableN" + + "ame\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025y", + "et_to_update_regions\030\001 \001(\r\022\025\n\rtotal_regi" + + "ons\030\002 \001(\r\"=\n\032GetTableDescriptorsRequest\022" + + "\037\n\013table_names\030\001 \003(\0132\n.TableName\"A\n\033GetT" + + "ableDescriptorsResponse\022\"\n\014table_schema\030" + + "\001 \003(\0132\014.TableSchema\"\026\n\024GetTableNamesRequ" + + "est\"8\n\025GetTableNamesResponse\022\037\n\013table_na" + + "mes\030\001 \003(\0132\n.TableName\"\031\n\027GetClusterStatu" + + "sRequest\"B\n\030GetClusterStatusResponse\022&\n\016" + + "cluster_status\030\001 \002(\0132\016.ClusterStatus\"\030\n\026" + + "IsMasterRunningRequest\"4\n\027IsMasterRunnin", + "gResponse\022\031\n\021is_master_running\030\001 \002(\010\"@\n\024" + + "ExecProcedureRequest\022(\n\tprocedure\030\001 \002(\0132" + + "\025.ProcedureDescription\"1\n\025ExecProcedureR" + + "esponse\022\030\n\020expected_timeout\030\001 \002(\003\"B\n\026IsP" + + "rocedureDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025" + + ".ProcedureDescription\"W\n\027IsProcedureDone" + + "Response\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapsho" + + "t\030\002 \001(\0132\025.ProcedureDescription\"T\n\024Trunca" + + "teTableRequest\022\035\n\ttableName\030\001 \002(\0132\n.Tabl" + + "eName\022\035\n\016preserveSplits\030\002 \001(\010:\005false\"\027\n\025", + "TruncateTableResponse\"\035\n\033SecurityCapabil" + + "itiesRequest\"\343\001\n\034SecurityCapabilitiesRes" + + "ponse\022>\n\014capabilities\030\001 \003(\0162(.SecurityCa" + + "pabilitiesResponse.Capability\"\202\001\n\nCapabi" + + "lity\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECUR" + + "E_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n" + + "\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILITY" + + "\020\004\".\n\030ListTablesOfGroupRequest\022\022\n\ngroup_" + + "name\030\001 \002(\t\";\n\031ListTablesOfGroupResponse\022" + + "\036\n\ntable_name\030\001 \003(\0132\n.TableName\")\n\023GetGr", + "oupInfoRequest\022\022\n\ngroup_name\030\001 \002(\t\"6\n\024Ge" + + "tGroupInfoResponse\022\036\n\ngroup_info\030\001 \001(\0132\n" + + ".GroupInfo\"<\n\032GetGroupInfoOfTableRequest" + + "\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"=\n\033GetG" + + "roupInfoOfTableResponse\022\036\n\ngroup_info\030\001 " + + "\001(\0132\n.GroupInfo\"F\n\022MoveServersRequest\022\024\n" + + "\014target_group\030\001 \002(\t\022\032\n\007servers\030\002 \003(\0132\t.H" + + "ostPort\"\025\n\023MoveServersResponse\"I\n\021MoveTa" + + "blesRequest\022\024\n\014target_group\030\001 \002(\t\022\036\n\ntab" + + "le_name\030\002 \003(\0132\n.TableName\"\024\n\022MoveTablesR", + "esponse\"%\n\017AddGroupRequest\022\022\n\ngroup_name" + + "\030\001 \002(\t\"\022\n\020AddGroupResponse\"(\n\022RemoveGrou" + + "pRequest\022\022\n\ngroup_name\030\001 \002(\t\"\025\n\023RemoveGr" + + "oupResponse\")\n\023BalanceGroupRequest\022\022\n\ngr" + + "oup_name\030\001 \002(\t\"*\n\024BalanceGroupResponse\022\022" + + "\n\nbalanceRan\030\001 \002(\010\"\027\n\025ListGroupInfosRequ" + + "est\"8\n\026ListGroupInfosResponse\022\036\n\ngroup_i" + + "nfo\030\001 \003(\0132\n.GroupInfo\"8\n\033GetGroupInfoOfS" + + "erverRequest\022\031\n\006server\030\001 \002(\0132\t.HostPort\"" + + ">\n\034GetGroupInfoOfServerResponse\022\036\n\ngroup", + "_info\030\001 \001(\0132\n.GroupInfo2\222\035\n\rMasterServic" + + "e\022S\n\024GetSchemaAlterStatus\022\034.GetSchemaAlt" + + "erStatusRequest\032\035.GetSchemaAlterStatusRe" + + "sponse\022P\n\023GetTableDescriptors\022\033.GetTable" + + "DescriptorsRequest\032\034.GetTableDescriptors" + + "Response\022>\n\rGetTableNames\022\025.GetTableName" + + "sRequest\032\026.GetTableNamesResponse\022G\n\020GetC" + + "lusterStatus\022\030.GetClusterStatusRequest\032\031" + + ".GetClusterStatusResponse\022D\n\017IsMasterRun" + + "ning\022\027.IsMasterRunningRequest\032\030.IsMaster", + "RunningResponse\0222\n\tAddColumn\022\021.AddColumn" + + "Request\032\022.AddColumnResponse\022;\n\014DeleteCol" + + "umn\022\024.DeleteColumnRequest\032\025.DeleteColumn" + + "Response\022;\n\014ModifyColumn\022\024.ModifyColumnR" + + "equest\032\025.ModifyColumnResponse\0225\n\nMoveReg" + + "ion\022\022.MoveRegionRequest\032\023.MoveRegionResp" + + "onse\022Y\n\026DispatchMergingRegions\022\036.Dispatc" + + "hMergingRegionsRequest\032\037.DispatchMerging" + + "RegionsResponse\022;\n\014AssignRegion\022\024.Assign" + + "RegionRequest\032\025.AssignRegionResponse\022A\n\016", + "UnassignRegion\022\026.UnassignRegionRequest\032\027" + + ".UnassignRegionResponse\022>\n\rOfflineRegion" + + "\022\025.OfflineRegionRequest\032\026.OfflineRegionR" + + "esponse\0228\n\013DeleteTable\022\023.DeleteTableRequ" + + "est\032\024.DeleteTableResponse\0228\n\013EnableTable" + + "\022\023.EnableTableRequest\032\024.EnableTableRespo" + + "nse\022;\n\014DisableTable\022\024.DisableTableReques" + + "t\032\025.DisableTableResponse\0228\n\013ModifyTable\022" + + "\023.ModifyTableRequest\032\024.ModifyTableRespon" + + "se\0228\n\013CreateTable\022\023.CreateTableRequest\032\024", + ".CreateTableResponse\022/\n\010Shutdown\022\020.Shutd" + + "ownRequest\032\021.ShutdownResponse\0225\n\nStopMas" + + "ter\022\022.StopMasterRequest\032\023.StopMasterResp" + + "onse\022,\n\007Balance\022\017.BalanceRequest\032\020.Balan" + + "ceResponse\022M\n\022SetBalancerRunning\022\032.SetBa" + + "lancerRunningRequest\032\033.SetBalancerRunnin" + + "gResponse\022J\n\021IsBalancerEnabled\022\031.IsBalan" + + "cerEnabledRequest\032\032.IsBalancerEnabledRes" + + "ponse\022A\n\016RunCatalogScan\022\026.RunCatalogScan" + + "Request\032\027.RunCatalogScanResponse\022S\n\024Enab", + "leCatalogJanitor\022\034.EnableCatalogJanitorR" + + "equest\032\035.EnableCatalogJanitorResponse\022\\\n" + + "\027IsCatalogJanitorEnabled\022\037.IsCatalogJani" + + "torEnabledRequest\032 .IsCatalogJanitorEnab" + + "ledResponse\022L\n\021ExecMasterService\022\032.Copro" + + "cessorServiceRequest\032\033.CoprocessorServic" + + "eResponse\022/\n\010Snapshot\022\020.SnapshotRequest\032" + + "\021.SnapshotResponse\022V\n\025GetCompletedSnapsh" + + "ots\022\035.GetCompletedSnapshotsRequest\032\036.Get" + + "CompletedSnapshotsResponse\022A\n\016DeleteSnap", + "shot\022\026.DeleteSnapshotRequest\032\027.DeleteSna" + + "pshotResponse\022A\n\016IsSnapshotDone\022\026.IsSnap" + + "shotDoneRequest\032\027.IsSnapshotDoneResponse" + + "\022D\n\017RestoreSnapshot\022\027.RestoreSnapshotReq" + + "uest\032\030.RestoreSnapshotResponse\022V\n\025IsRest" + + "oreSnapshotDone\022\035.IsRestoreSnapshotDoneR" + + "equest\032\036.IsRestoreSnapshotDoneResponse\022>" + + "\n\rExecProcedure\022\025.ExecProcedureRequest\032\026" + + ".ExecProcedureResponse\022D\n\017IsProcedureDon" + + "e\022\027.IsProcedureDoneRequest\032\030.IsProcedure", + "DoneResponse\022D\n\017ModifyNamespace\022\027.Modify" + + "NamespaceRequest\032\030.ModifyNamespaceRespon" + + "se\022D\n\017CreateNamespace\022\027.CreateNamespaceR" + + "equest\032\030.CreateNamespaceResponse\022D\n\017Dele" + + "teNamespace\022\027.DeleteNamespaceRequest\032\030.D" + + "eleteNamespaceResponse\022Y\n\026GetNamespaceDe" + + "scriptor\022\036.GetNamespaceDescriptorRequest" + + "\032\037.GetNamespaceDescriptorResponse\022_\n\030Lis" + + "tNamespaceDescriptors\022 .ListNamespaceDes" + + "criptorsRequest\032!.ListNamespaceDescripto", + "rsResponse\022t\n\037ListTableDescriptorsByName" + + "space\022\'.ListTableDescriptorsByNamespaceR" + + "equest\032(.ListTableDescriptorsByNamespace" + + "Response\022b\n\031ListTableNamesByNamespace\022!." + + "ListTableNamesByNamespaceRequest\032\".ListT" + + "ableNamesByNamespaceResponse\022>\n\rtruncate" + + "Table\022\025.TruncateTableRequest\032\026.TruncateT" + + "ableResponse\022V\n\027getSecurityCapabilities\022" + + "\034.SecurityCapabilitiesRequest\032\035.Security" + + "CapabilitiesResponse\022;\n\014GetGroupInfo\022\024.G", + "etGroupInfoRequest\032\025.GetGroupInfoRespons" + + "e\022P\n\023GetGroupInfoOfTable\022\033.GetGroupInfoO" + + "fTableRequest\032\034.GetGroupInfoOfTableRespo" + + "nse\022S\n\024GetGroupInfoOfServer\022\034.GetGroupIn" + + "foOfServerRequest\032\035.GetGroupInfoOfServer" + + "Response\0228\n\013MoveServers\022\023.MoveServersReq" + + "uest\032\024.MoveServersResponse\0225\n\nMoveTables" + + "\022\022.MoveTablesRequest\032\023.MoveTablesRespons" + + "e\022/\n\010AddGroup\022\020.AddGroupRequest\032\021.AddGro" + + "upResponse\0228\n\013RemoveGroup\022\023.RemoveGroupR", + "equest\032\024.RemoveGroupResponse\022;\n\014BalanceG" + + "roup\022\024.BalanceGroupRequest\032\025.BalanceGrou" + + "pResponse\022A\n\016ListGroupInfos\022\026.ListGroupI" + + "nfosRequest\032\027.ListGroupInfosResponseBB\n*" + + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -47137,6 +58660,126 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); + internal_static_ListTablesOfGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(86); + internal_static_ListTablesOfGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListTablesOfGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_ListTablesOfGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(87); + internal_static_ListTablesOfGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListTablesOfGroupResponse_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetGroupInfoRequest_descriptor = + getDescriptor().getMessageTypes().get(88); + internal_static_GetGroupInfoRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_GetGroupInfoResponse_descriptor = + getDescriptor().getMessageTypes().get(89); + internal_static_GetGroupInfoResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_GetGroupInfoOfTableRequest_descriptor = + getDescriptor().getMessageTypes().get(90); + internal_static_GetGroupInfoOfTableRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoOfTableRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetGroupInfoOfTableResponse_descriptor = + getDescriptor().getMessageTypes().get(91); + internal_static_GetGroupInfoOfTableResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoOfTableResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_MoveServersRequest_descriptor = + getDescriptor().getMessageTypes().get(92); + internal_static_MoveServersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveServersRequest_descriptor, + new java.lang.String[] { "TargetGroup", "Servers", }); + internal_static_MoveServersResponse_descriptor = + getDescriptor().getMessageTypes().get(93); + internal_static_MoveServersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveServersResponse_descriptor, + new java.lang.String[] { }); + internal_static_MoveTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(94); + internal_static_MoveTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveTablesRequest_descriptor, + new java.lang.String[] { "TargetGroup", "TableName", }); + internal_static_MoveTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(95); + internal_static_MoveTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MoveTablesResponse_descriptor, + new java.lang.String[] { }); + internal_static_AddGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(96); + internal_static_AddGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_AddGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(97); + internal_static_AddGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_RemoveGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(98); + internal_static_RemoveGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoveGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_RemoveGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(99); + internal_static_RemoveGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RemoveGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_BalanceGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(100); + internal_static_BalanceGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BalanceGroupRequest_descriptor, + new java.lang.String[] { "GroupName", }); + internal_static_BalanceGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(101); + internal_static_BalanceGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_BalanceGroupResponse_descriptor, + new java.lang.String[] { "BalanceRan", }); + internal_static_ListGroupInfosRequest_descriptor = + getDescriptor().getMessageTypes().get(102); + internal_static_ListGroupInfosRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListGroupInfosRequest_descriptor, + new java.lang.String[] { }); + internal_static_ListGroupInfosResponse_descriptor = + getDescriptor().getMessageTypes().get(103); + internal_static_ListGroupInfosResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListGroupInfosResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); + internal_static_GetGroupInfoOfServerRequest_descriptor = + getDescriptor().getMessageTypes().get(104); + internal_static_GetGroupInfoOfServerRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoOfServerRequest_descriptor, + new java.lang.String[] { "Server", }); + internal_static_GetGroupInfoOfServerResponse_descriptor = + getDescriptor().getMessageTypes().get(105); + internal_static_GetGroupInfoOfServerResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetGroupInfoOfServerResponse_descriptor, + new java.lang.String[] { "GroupInfo", }); return null; } }; @@ -47146,6 +58789,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.getDescriptor(), }, assigner); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java new file mode 100644 index 0000000..3259c3e --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java @@ -0,0 +1,1330 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: RSGroup.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class RSGroupProtos { + private RSGroupProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface GroupInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // repeated .HostPort servers = 4; + /** + * repeated .HostPort servers = 4; + */ + java.util.List + getServersList(); + /** + * repeated .HostPort servers = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index); + /** + * repeated .HostPort servers = 4; + */ + int getServersCount(); + /** + * repeated .HostPort servers = 4; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .HostPort servers = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index); + + // repeated .TableName tables = 3; + /** + * repeated .TableName tables = 3; + */ + java.util.List + getTablesList(); + /** + * repeated .TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .TableName tables = 3; + */ + int getTablesCount(); + /** + * repeated .TableName tables = 3; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + } + /** + * Protobuf type {@code GroupInfo} + */ + public static final class GroupInfo extends + com.google.protobuf.GeneratedMessage + implements GroupInfoOrBuilder { + // Use GroupInfo.newBuilder() to construct. + private GroupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GroupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GroupInfo defaultInstance; + public static GroupInfo getDefaultInstance() { + return defaultInstance; + } + + public GroupInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GroupInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GroupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GroupInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .HostPort servers = 4; + public static final int SERVERS_FIELD_NUMBER = 4; + private java.util.List servers_; + /** + * repeated .HostPort servers = 4; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .HostPort servers = 4; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .HostPort servers = 4; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + return servers_.get(index); + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + // repeated .TableName tables = 3; + public static final int TABLES_FIELD_NUMBER = 3; + private java.util.List tables_; + /** + * repeated .TableName tables = 3; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .TableName tables = 3; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + private void initFields() { + name_ = ""; + servers_ = java.util.Collections.emptyList(); + tables_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(3, tables_.get(i)); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(4, servers_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tables_.get(i)); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, servers_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GroupInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + tablesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_GroupInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.GroupInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // repeated .HostPort servers = 4; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> serversBuilder_; + + /** + * repeated .HostPort servers = 4; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .HostPort servers = 4; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .HostPort servers = 4; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .HostPort servers = 4; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .HostPort servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.getDefaultInstance()); + } + /** + * repeated .HostPort servers = 4; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPort.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.HostPortOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // repeated .TableName tables = 3; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .TableName tables = 3; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .TableName tables = 3; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .TableName tables = 3; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GroupInfo) + } + + static { + defaultInstance = new GroupInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GroupInfo) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GroupInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GroupInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\rRSGroup.proto\032\013HBase.proto\"Q\n\tGroupInf" + + "o\022\014\n\004name\030\001 \002(\t\022\032\n\007servers\030\004 \003(\0132\t.HostP" + + "ort\022\032\n\006tables\030\003 \003(\0132\n.TableNameBC\n*org.a" + + "pache.hadoop.hbase.protobuf.generatedB\rR" + + "SGroupProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_GroupInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_GroupInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GroupInfo_descriptor, + new java.lang.String[] { "Name", "Servers", "Tables", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index 3e3d570..192c8dd 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -25,6 +25,11 @@ option optimize_for = SPEED; import "Cell.proto"; +message HostPort { + required string host_name = 1; + required uint32 port = 2; +} + /** * Table Name */ diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 7400e10..f538ff1 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -28,6 +28,7 @@ option optimize_for = SPEED; import "HBase.proto"; import "Client.proto"; import "ClusterStatus.proto"; +import "RSGroup.proto"; /* Column-level protobufs */ @@ -379,6 +380,85 @@ message SecurityCapabilitiesResponse { repeated Capability capabilities = 1; } +/** Group level protobufs */ + +message ListTablesOfGroupRequest { + required string group_name = 1; +} + +message ListTablesOfGroupResponse { + repeated TableName table_name = 1; +} + +message GetGroupInfoRequest { + required string group_name = 1; +} + +message GetGroupInfoResponse { + optional GroupInfo group_info = 1; +} + +message GetGroupInfoOfTableRequest { + required TableName table_name = 1; +} + +message GetGroupInfoOfTableResponse { + optional GroupInfo group_info = 1; +} + +message MoveServersRequest { + required string target_group = 1; + repeated HostPort servers = 2; +} + +message MoveServersResponse { +} + +message MoveTablesRequest { + required string target_group = 1; + repeated TableName table_name = 2; +} + +message MoveTablesResponse { +} + +message AddGroupRequest { + required string group_name = 1; +} + +message AddGroupResponse { +} + +message RemoveGroupRequest { + required string group_name = 1; +} + +message RemoveGroupResponse { +} + +message BalanceGroupRequest { + required string group_name = 1; +} + +message BalanceGroupResponse { + required bool balanceRan = 1; +} + +message ListGroupInfosRequest { +} + +message ListGroupInfosResponse { + repeated GroupInfo group_info = 1; +} + +message GetGroupInfoOfServerRequest { + required HostPort server = 1; +} + +message GetGroupInfoOfServerResponse { + optional GroupInfo group_info = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -591,4 +671,31 @@ service MasterService { /** Returns the security capabilities in effect on the cluster */ rpc getSecurityCapabilities(SecurityCapabilitiesRequest) returns(SecurityCapabilitiesResponse); + + rpc GetGroupInfo(GetGroupInfoRequest) + returns (GetGroupInfoResponse); + + rpc GetGroupInfoOfTable(GetGroupInfoOfTableRequest) + returns (GetGroupInfoOfTableResponse); + + rpc GetGroupInfoOfServer(GetGroupInfoOfServerRequest) + returns (GetGroupInfoOfServerResponse); + + rpc MoveServers(MoveServersRequest) + returns (MoveServersResponse); + + rpc MoveTables(MoveTablesRequest) + returns (MoveTablesResponse); + + rpc AddGroup(AddGroupRequest) + returns (AddGroupResponse); + + rpc RemoveGroup(RemoveGroupRequest) + returns (RemoveGroupResponse); + + rpc BalanceGroup(BalanceGroupRequest) + returns (BalanceGroupResponse); + + rpc ListGroupInfos(ListGroupInfosRequest) + returns (ListGroupInfosResponse); } diff --git a/hbase-protocol/src/main/protobuf/RSGroup.proto b/hbase-protocol/src/main/protobuf/RSGroup.proto new file mode 100644 index 0000000..a2a0094 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/RSGroup.proto @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RSGroupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +message GroupInfo { + required string name = 1; + repeated HostPort servers = 4; + repeated TableName tables = 3; +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index 1aff956..ab1230e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionPlan; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -393,4 +395,55 @@ public abstract class BaseMasterAndRegionObserver extends BaseRegionObserver public void postGetTableDescriptors(ObserverContext ctx, List descriptors) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, + String groupName) throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 29be365..ec8f6e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionPlan; @@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG}) @InterfaceStability.Evolving @@ -408,5 +410,55 @@ public class BaseMasterObserver implements MasterObserver { public void postGetTableDescriptors(ObserverContext ctx, List descriptors) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, + String groupName) throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index d6cb609..5bb2c76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionPlan; @@ -740,4 +742,98 @@ public interface MasterObserver extends Coprocessor { */ void postModifyNamespace(final ObserverContext ctx, NamespaceDescriptor ns) throws IOException; + + /** + * Called before servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers + * @param targetGroup + * @throws IOException + */ + void preMoveServers(final ObserverContext ctx, + Set servers, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers + * @param targetGroup + * @throws IOException + */ + void postMoveServers(final ObserverContext ctx, + Set servers, String targetGroup) throws IOException; + + /** + * Called before tables are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param tables + * @param targetGroup + * @throws IOException + */ + void preMoveTables(final ObserverContext ctx, + Set tables, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param tables + * @param targetGroup + * @throws IOException + */ + void postMoveTables(final ObserverContext ctx, + Set tables, String targetGroup) throws IOException; + + /** + * Called before a new region server group is added + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void preAddGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called after a new region server group is added + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void postAddGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void preRemoveGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException + */ + void postRemoveGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @throws IOException + */ + void preBalanceGroup(final ObserverContext ctx, + String groupName) throws IOException; + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @throws IOException + */ + void postBalanceGroup(final ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java new file mode 100644 index 0000000..551aa6a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java @@ -0,0 +1,493 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.metrics.util.MBeanUtil; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Service to support Region Server Grouping (HBase-6721) + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class GroupAdminServer implements GroupAdmin { + private static final Log LOG = LogFactory.getLog(GroupAdminServer.class); + + private MasterServices master; + //List of servers that are being moved from one group to another + //Key=host:port,Value=targetGroup + ConcurrentMap serversInTransition = + new ConcurrentHashMap(); + private GroupInfoManagerImpl groupInfoManager; + + public GroupAdminServer(MasterServices master) throws IOException { + this.master = master; + groupInfoManager = new GroupInfoManagerImpl(master); + registerMBean(); + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + return getGroupInfoManager().getGroup(groupName); + } + + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + String groupName = getGroupInfoManager().getGroupOfTable(tableName); + if (groupName == null) { + if(master.getTableDescriptors().get(tableName) == null) { + throw new ConstraintException("Table "+tableName+" does not exist"); + } + throw new ConstraintException("Table "+tableName+" has no group"); + } + return getGroupInfoManager().getGroup(groupName); + } + + @Override + public void moveServers(Set servers, String targetGroupName) + throws IOException { + if (servers == null) { + throw new DoNotRetryIOException( + "The list of servers cannot be null."); + } + if (StringUtils.isEmpty(targetGroupName)) { + throw new DoNotRetryIOException("The target group cannot be null."); + } + if(servers.size() < 1) { + return; + } + + GroupInfo targetGrp = getGroupInfo(targetGroupName); + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().preMoveServers(servers, targetGroupName); + } + //we only allow a move from a single source group + //so this should be ok + GroupInfo srcGrp = manager.getGroupOfServer(servers.iterator().next()); + //only move online servers (from default) + //or servers from other groups + //this prevents bogus servers from entering groups + if(GroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) { + Set onlineServers = new HashSet(); + for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { + onlineServers.add(server.getHostPort()); + } + for(HostPort el: servers) { + if(!onlineServers.contains(el)) { + throw new DoNotRetryIOException( + "Server "+el+" is not an online server in default group."); + } + } + } + + if(srcGrp.getServers().size() <= servers.size() && + srcGrp.getTables().size() > 0) { + throw new DoNotRetryIOException("Cannot leave a group "+srcGrp.getName()+ + " that contains tables " +"without servers."); + } + + String sourceGroupName = + getGroupInfoManager().getGroupOfServer(srcGrp.getServers().iterator().next()).getName(); + if(getGroupInfo(targetGroupName) == null) { + throw new ConstraintException("Target group does not exist: "+targetGroupName); + } + + for(HostPort server: servers) { + if (serversInTransition.containsKey(server)) { + throw new DoNotRetryIOException( + "Server list contains a server that is already being moved: "+server); + } + String tmpGroup = getGroupInfoManager().getGroupOfServer(server).getName(); + if (sourceGroupName != null && !tmpGroup.equals(sourceGroupName)) { + throw new DoNotRetryIOException( + "Move server request should only come from one source group. "+ + "Expecting only "+sourceGroupName+" but contains "+tmpGroup); + } + } + + if(sourceGroupName.equals(targetGroupName)) { + throw new ConstraintException( + "Target group is the same as source group: "+targetGroupName); + } + + try { + //update the servers as in transition + for (HostPort server : servers) { + serversInTransition.put(server, targetGroupName); + } + + getGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName); + boolean found; + List tmpServers = Lists.newArrayList(servers); + do { + found = false; + for (Iterator iter = tmpServers.iterator(); + iter.hasNext(); ) { + HostPort rs = iter.next(); + //get online regions + List regions = new LinkedList(); + for (Map.Entry el : + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + if (el.getValue().getHostPort().equals(rs)) { + regions.add(el.getKey()); + } + } + for (RegionState state : + master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) { + if (state.getServerName().getHostPort().equals(rs)) { + regions.add(state.getRegion()); + } + } + + //unassign regions for a server + LOG.info("Unassigning " + regions.size() + + " regions from server " + rs + " for move to " + targetGroupName); + if (regions.size() > 0) { + //TODO bulk unassign or throttled unassign? + for (HRegionInfo region : regions) { + //regions might get assigned from tables of target group + //so we need to filter + if (!targetGrp.containsTable(region.getTable())) { + master.getAssignmentManager().unassign(region); + found = true; + } + } + } + if (!found) { + iter.remove(); + } + } + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + } + } while (found); + } finally { + //remove from transition + for (HostPort server : servers) { + serversInTransition.remove(server); + } + } + + LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName); + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().postMoveServers(servers, targetGroupName); + } + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + if (tables == null) { + throw new ConstraintException( + "The list of servers cannot be null."); + } + if(tables.size() < 1) { + LOG.debug("moveTables() passed an empty set. Ignoring."); + return; + } + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().preMoveTables(tables, targetGroup); + } + + if(targetGroup != null) { + GroupInfo destGroup = manager.getGroup(targetGroup); + if(destGroup == null) { + throw new ConstraintException("Target group does not exist: "+targetGroup); + } + if(destGroup.getServers().size() < 1) { + throw new ConstraintException("Target group must have at least one server."); + } + } + + for(TableName table : tables) { + String srcGroup = manager.getGroupOfTable(table); + if(srcGroup != null && srcGroup.equals(targetGroup)) { + throw new ConstraintException("Source group is the same as target group for table "+table+" :"+srcGroup); + } + } + manager.moveTables(tables, targetGroup); + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().postMoveTables(tables, targetGroup); + } + } + for(TableName table: tables) { + for(HRegionInfo region: + master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) { + master.getAssignmentManager().unassign(region); + } + } + } + + @Override + public void addGroup(String name) throws IOException { + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().preAddGroup(name); + } + getGroupInfoManager().addGroup(new GroupInfo(name)); + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().postAddGroup(name); + } + } + + @Override + public void removeGroup(String name) throws IOException { + GroupInfoManager manager = getGroupInfoManager(); + synchronized (manager) { + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().preRemoveGroup(name); + } + GroupInfo groupInfo = getGroupInfoManager().getGroup(name); + if(groupInfo == null) { + throw new DoNotRetryIOException("Group "+name+" does not exist"); + } + int tableCount = groupInfo.getTables().size(); + if (tableCount > 0) { + throw new DoNotRetryIOException("Group "+name+" must have no associated tables: "+tableCount); + } + int serverCount = groupInfo.getServers().size(); + if(serverCount > 0) { + throw new DoNotRetryIOException("Group "+name+" must have no associated servers: "+serverCount); + } + for(NamespaceDescriptor ns: master.listNamespaceDescriptors()) { + String nsGroup = ns.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(nsGroup != null && nsGroup.equals(name)) { + throw new DoNotRetryIOException("Group "+name+" is referenced by namespace: "+ns.getName()); + } + } + manager.removeGroup(name); + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().postRemoveGroup(name); + } + } + } + + @Override + public boolean balanceGroup(String groupName) throws IOException { + ServerManager serverManager = master.getServerManager(); + AssignmentManager assignmentManager = master.getAssignmentManager(); + LoadBalancer balancer = master.getLoadBalancer(); + + boolean balancerRan; + synchronized (balancer) { + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().preBalanceGroup(groupName); + } + // Only allow one balance run at at time. + Map groupRIT = groupGetRegionsInTransition(groupName); + if (groupRIT.size() > 0) { + LOG.debug("Not running balancer because " + + groupRIT.size() + + " region(s) in transition: " + + StringUtils.abbreviate( + master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), + 256)); + return false; + } + if (serverManager.areDeadServersInProgress()) { + LOG.debug("Not running balancer because processing dead regionserver(s): " + + serverManager.getDeadServers()); + return false; + } + + //We balance per group instead of per table + List plans = new ArrayList(); + for(Map.Entry>> tableMap: + getGroupAssignmentsByTable(groupName).entrySet()) { + LOG.info("Creating partial plan for table "+tableMap.getKey()+": "+tableMap.getValue()); + List partialPlans = balancer.balanceCluster(tableMap.getValue()); + LOG.info("Partial plan for table "+tableMap.getKey()+": "+partialPlans); + if (partialPlans != null) { + plans.addAll(partialPlans); + } + } + long startTime = System.currentTimeMillis(); + balancerRan = plans != null; + if (plans != null && !plans.isEmpty()) { + LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size()); + for (RegionPlan plan: plans) { + LOG.info("balance " + plan); + assignmentManager.balance(plan); + } + LOG.info("Group balance "+groupName+" completed after "+(System.currentTimeMillis()-startTime)+" seconds"); + } + if (master.getCoprocessorHost() != null) { + master.getCoprocessorHost().postBalanceGroup(groupName, balancerRan); + } + } + return balancerRan; + } + + @Override + public List listGroups() throws IOException { + return getGroupInfoManager().listGroups(); + } + + @Override + public GroupInfo getGroupOfServer(HostPort hostPort) throws IOException { + return getGroupInfoManager().getGroupOfServer(hostPort); + } + + @InterfaceAudience.Private + public GroupInfoManager getGroupInfoManager() throws IOException { + return groupInfoManager; + } + + private Map groupGetRegionsInTransition(String groupName) + throws IOException { + Map rit = Maps.newTreeMap(); + AssignmentManager am = master.getAssignmentManager(); + GroupInfo groupInfo = getGroupInfo(groupName); + for(TableName tableName : groupInfo.getTables()) { + for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { + RegionState state = + master.getAssignmentManager().getRegionStates().getRegionTransitionState(regionInfo); + if(state != null) { + rit.put(regionInfo.getEncodedName(), state); + } + } + } + return rit; + } + + private Map>> + getGroupAssignmentsByTable(String groupName) throws IOException { + Map>> result = Maps.newHashMap(); + GroupInfo groupInfo = getGroupInfo(groupName); + Map>> assignments = Maps.newHashMap(); + for(Map.Entry entry: + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + TableName currTable = entry.getKey().getTable(); + ServerName currServer = entry.getValue(); + HRegionInfo currRegion = entry.getKey(); + if(groupInfo.getTables().contains(currTable)) { + if(!assignments.containsKey(entry.getKey().getTable())) { + assignments.put(currTable, new HashMap>()); + } + if(!assignments.get(currTable).containsKey(currServer)) { + assignments.get(currTable).put(currServer, new ArrayList()); + } + assignments.get(currTable).get(currServer).add(currRegion); + } + } + + Map> serverMap = Maps.newHashMap(); + for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { + if(groupInfo.getServers().contains(serverName.getHostPort())) { + serverMap.put(serverName, Collections.EMPTY_LIST); + } + } + + //add all tables that are members of the group + for(TableName tableName : groupInfo.getTables()) { + if(assignments.containsKey(tableName)) { + result.put(tableName, new HashMap>()); + result.get(tableName).putAll(serverMap); + result.get(tableName).putAll(assignments.get(tableName)); + LOG.debug("Adding assignments for "+tableName+": "+assignments.get(tableName)); + } + } + + return result; + } + + void registerMBean() { + MXBeanImpl mxBeanInfo = + MXBeanImpl.init(this, master); + MBeanUtil.registerMBean("Group", "Group", mxBeanInfo); + LOG.info("Registered Group MXBean"); + } + + public void prepareGroupForTable(HTableDescriptor desc) throws IOException { + String groupName = + master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString()) + .getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if (groupName == null) { + groupName = GroupInfo.DEFAULT_GROUP; + } + GroupInfo groupInfo = getGroupInfo(groupName); + if (groupInfo == null) { + throw new ConstraintException("Group " + groupName + " does not exist."); + } + if (!groupInfo.containsTable(desc.getTableName())) { + LOG.debug("Pre-moving table " + desc.getTableName() + " to group " + groupName); + moveTables(Sets.newHashSet(desc.getTableName()), groupName); + } + } + + public void cleanupGroupForTable(TableName tableName) throws IOException { + try { + GroupInfo group = getGroupInfoOfTable(tableName); + if (group != null) { + LOG.debug("Removing deleted table from table group " + group.getName()); + moveTables(Sets.newHashSet(tableName), null); + } + } catch (ConstraintException ex) { + LOG.debug("Failed to perform group information cleanup for table: " + tableName, ex); + } catch (IOException ex) { + LOG.debug("Failed to perform group information cleanup for table: " + tableName, ex); + } + } + + @Override + public void close() throws IOException { + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java new file mode 100644 index 0000000..85d203c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java @@ -0,0 +1,411 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.LinkedListMultimap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; +import org.apache.hadoop.util.ReflectionUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) + * It does region balance based on a table's group membership. + * + * Most assignment methods contain two exclusive code paths: Online - when the group + * table is online and Offline - when it is unavailable. + * + * During Offline, assignments are assigned based on cached information in zookeeper. + * If unavailable (ie bootstrap) then regions are assigned randomly. + * + * Once the GROUP table has been assigned, the balancer switches to Online and will then + * start providing appropriate assignments for user tables. + * + */ +@InterfaceAudience.Public +public class GroupBasedLoadBalancer implements GroupableBalancer, LoadBalancer { + /** Config for pluggable load balancers */ + public static final String HBASE_GROUP_LOADBALANCER_CLASS = "hbase.group.grouploadbalancer.class"; + + private static final Log LOG = LogFactory.getLog(GroupBasedLoadBalancer.class); + + private Configuration config; + private ClusterStatus clusterStatus; + private MasterServices masterServices; + private GroupInfoManager groupManager; + private LoadBalancer internalBalancer; + + //used during reflection by LoadBalancerFactory + @InterfaceAudience.Private + public GroupBasedLoadBalancer() { + } + + //This constructor should only be used for unit testing + @InterfaceAudience.Private + public GroupBasedLoadBalancer(GroupInfoManager groupManager) { + this.groupManager = groupManager; + } + + @Override + public Configuration getConf() { + return config; + } + + @Override + public void setConf(Configuration conf) { + this.config = conf; + } + + @Override + public void setClusterStatus(ClusterStatus st) { + this.clusterStatus = st; + } + + @Override + public void setMasterServices(MasterServices masterServices) { + this.masterServices = masterServices; + } + + @Override + public List balanceCluster(Map> clusterState) + throws HBaseIOException { + + if (!isOnline()) { + throw new IllegalStateException(GroupInfoManager.GROUP_TABLE_NAME+ + " is not online, unable to perform balance"); + } + + Map> correctedState = correctAssignments(clusterState); + List regionPlans = new ArrayList(); + try { + for (GroupInfo info : groupManager.listGroups()) { + Map> groupClusterState = new HashMap>(); + for (HostPort sName : info.getServers()) { + for(ServerName curr: clusterState.keySet()) { + if(curr.getHostPort().equals(sName)) { + groupClusterState.put(curr, correctedState.get(curr)); + } + } + } + List groupPlans = this.internalBalancer + .balanceCluster(groupClusterState); + if (groupPlans != null) { + regionPlans.addAll(groupPlans); + } + } + } catch (IOException exp) { + LOG.warn("Exception while balancing cluster.", exp); + regionPlans.clear(); + } + return regionPlans; + } + + @Override + public Map> roundRobinAssignment ( + List regions, List servers) throws HBaseIOException { + Map> assignments = Maps.newHashMap(); + ListMultimap regionMap = ArrayListMultimap.create(); + ListMultimap serverMap = ArrayListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + Map> result = + this.internalBalancer.roundRobinAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey)); + if(result != null) { + assignments.putAll(result); + } + } + } + return assignments; + } + + @Override + public Map> retainAssignment( + Map regions, List servers) throws HBaseIOException { + try { + Map> assignments = new TreeMap>(); + ListMultimap groupToRegion = ArrayListMultimap.create(); + Set misplacedRegions = getMisplacedRegions(regions); + for (HRegionInfo region : regions.keySet()) { + if (!misplacedRegions.contains(region)) { + String groupName = groupManager.getGroupOfTable(region.getTable()); + groupToRegion.put(groupName, region); + } + } + // Now the "groupToRegion" map has only the regions which have correct + // assignments. + for (String key : groupToRegion.keySet()) { + Map currentAssignmentMap = new TreeMap(); + List regionList = groupToRegion.get(key); + GroupInfo info = groupManager.getGroup(key); + List candidateList = filterOfflineServers(info, servers); + for (HRegionInfo region : regionList) { + currentAssignmentMap.put(region, regions.get(region)); + } + if(candidateList.size() > 0) { + assignments.putAll(this.internalBalancer.retainAssignment( + currentAssignmentMap, candidateList)); + } + } + + for (HRegionInfo region : misplacedRegions) { + String groupName = groupManager.getGroupOfTable( + region.getTable()); + GroupInfo info = groupManager.getGroup(groupName); + List candidateList = filterOfflineServers(info, servers); + ServerName server = this.internalBalancer.randomAssignment(region, + candidateList); + if (server != null && !assignments.containsKey(server)) { + assignments.put(server, new ArrayList()); + } else if (server != null) { + assignments.get(server).add(region); + } else { + //if not server is available assign to bogus so it ends up in RIT + if(!assignments.containsKey(BOGUS_SERVER_NAME)) { + assignments.put(BOGUS_SERVER_NAME, new ArrayList()); + } + assignments.get(BOGUS_SERVER_NAME).add(region); + } + } + return assignments; + } catch (IOException e) { + throw new HBaseIOException("Failed to do online retain assignment", e); + } + } + + @Override + public Map immediateAssignment( + List regions, List servers) throws HBaseIOException { + Map assignments = Maps.newHashMap(); + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + assignments.putAll( + this.internalBalancer.immediateAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey))); + } + } + return assignments; + } + + @Override + public ServerName randomAssignment(HRegionInfo region, + List servers) throws HBaseIOException { + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap); + List filteredServers = serverMap.get(regionMap.keySet().iterator().next()); + return this.internalBalancer.randomAssignment(region, filteredServers); + } + + private void generateGroupMaps( + List regions, + List servers, + ListMultimap regionMap, + ListMultimap serverMap) throws HBaseIOException { + try { + for (HRegionInfo region : regions) { + String groupName = groupManager.getGroupOfTable(region.getTable()); + if(groupName == null) { + LOG.warn("Group for table "+region.getTable()+" is null"); + } + regionMap.put(groupName, region); + } + for (String groupKey : regionMap.keySet()) { + GroupInfo info = groupManager.getGroup(groupKey); + serverMap.putAll(groupKey, filterOfflineServers(info, servers)); + if(serverMap.get(groupKey).size() < 1) { + serverMap.put(groupKey, BOGUS_SERVER_NAME); + } + } + } catch(IOException e) { + throw new HBaseIOException("Failed to generate group maps", e); + } + } + + private List filterOfflineServers(GroupInfo groupInfo, + List onlineServers) { + if (groupInfo != null) { + return filterServers(groupInfo.getServers(), onlineServers); + } else { + LOG.debug("Group Information found to be null. Some regions might be unassigned."); + return Collections.EMPTY_LIST; + } + } + + /** + * Filter servers based on the online servers. + * + * @param servers + * the servers + * @param onlineServers + * List of servers which are online. + * @return the list + */ + private List filterServers(Collection servers, + Collection onlineServers) { + ArrayList finalList = new ArrayList(); + for (HostPort server : servers) { + for(ServerName curr: onlineServers) { + if(curr.getHostPort().equals(server)) { + finalList.add(curr); + } + } + } + return finalList; + } + + private ListMultimap groupRegions( + List regionList) throws IOException { + ListMultimap regionGroup = ArrayListMultimap + .create(); + for (HRegionInfo region : regionList) { + String groupName = groupManager.getGroupOfTable(region.getTable()); + regionGroup.put(groupName, region); + } + return regionGroup; + } + + private Set getMisplacedRegions( + Map regions) throws IOException { + Set misplacedRegions = new HashSet(); + for (HRegionInfo region : regions.keySet()) { + ServerName assignedServer = regions.get(region); + GroupInfo info = groupManager.getGroup(groupManager.getGroupOfTable(region.getTable())); + if (assignedServer != null && + (info == null || !info.containsServer(assignedServer.getHostPort()))) { + LOG.warn("Found misplaced region: "+region.getRegionNameAsString()+ + " on server: "+assignedServer+ + " found in group: "+groupManager.getGroupOfServer(assignedServer.getHostPort())+ + " outside of group: "+info.getName()); + misplacedRegions.add(region); + } + } + return misplacedRegions; + } + + private Map> correctAssignments( + Map> existingAssignments){ + Map> correctAssignments = new TreeMap>(); + List misplacedRegions = new LinkedList(); + for (ServerName sName : existingAssignments.keySet()) { + correctAssignments.put(sName, new LinkedList()); + List regions = existingAssignments.get(sName); + for (HRegionInfo region : regions) { + GroupInfo info = null; + try { + info = groupManager.getGroup(groupManager.getGroupOfTable(region.getTable())); + }catch(IOException exp){ + LOG.debug("Group information null for region of table " + region.getTable(), + exp); + } + if ((info == null) || (!info.containsServer(sName.getHostPort()))) { + // Misplaced region. + misplacedRegions.add(region); + } else { + correctAssignments.get(sName).add(region); + } + } + } + + //TODO bulk unassign? + //unassign misplaced regions, so that they are assigned to correct groups. + for(HRegionInfo info: misplacedRegions) { + this.masterServices.getAssignmentManager().unassign(info); + } + return correctAssignments; + } + + @Override + public void initialize() throws HBaseIOException { + // Create the balancer + Class balancerKlass = config.getClass( + HBASE_GROUP_LOADBALANCER_CLASS, + StochasticLoadBalancer.class, LoadBalancer.class); + internalBalancer = ReflectionUtils.newInstance(balancerKlass, config); + internalBalancer.setClusterStatus(clusterStatus); + internalBalancer.setMasterServices(masterServices); + internalBalancer.setConf(config); + internalBalancer.initialize(); + } + + public boolean isOnline() { + return groupManager != null && groupManager.isOnline(); + } + + @InterfaceAudience.Private + public GroupInfoManager getGroupInfoManager() throws IOException { + return groupManager; + } + + @Override + public void regionOnline(HRegionInfo regionInfo, ServerName sn) { + } + + @Override + public void regionOffline(HRegionInfo regionInfo) { + } + + @Override + public void stop(String why) { + } + + @Override + public boolean isStopped() { + return false; + } + + @Override + public void setGroupInfoManager(GroupInfoManager groupInfoManager) throws IOException { + this.groupManager = groupInfoManager; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java new file mode 100644 index 0000000..4ed7fa8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManager.java @@ -0,0 +1,129 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +/** + * Interface used to manage GroupInfo storage. An implementation + * has the option to support offline mode. + * See {@link GroupBasedLoadBalancer} + */ +public interface GroupInfoManager { + //Assigned before user tables + public static final TableName GROUP_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR,"rsgroup"); + public static final byte[] GROUP_TABLE_NAME_BYTES = GROUP_TABLE_NAME.toBytes(); + public static final String groupZNode = "groupInfo"; + public static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); + public static final byte[] ROW_KEY = {0}; + + + /** + * Adds the group. + * + * @param groupInfo the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void addGroup(GroupInfo groupInfo) throws IOException; + + /** + * Remove a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + void removeGroup(String groupName) throws IOException; + + /** + * move servers to a new group. + * @param hostPorts list of servers, must be part of the same group + * @param srcGroup + * @param dstGroup + * @return true if move was successful + * @throws java.io.IOException + */ + boolean moveServers(Set hostPorts, String srcGroup, String dstGroup) throws IOException; + + /** + * Gets the group info of server. + * + * @param hostPort the server + * @return An instance of GroupInfo + */ + GroupInfo getGroupOfServer(HostPort hostPort) throws IOException; + + /** + * Gets the group information. + * + * @param groupName the group name + * @return An instance of GroupInfo + */ + GroupInfo getGroup(String groupName) throws IOException; + + /** + * Get the group membership of a table + * @param tableName + * @return Group name of table + * @throws java.io.IOException + */ + String getGroupOfTable(TableName tableName) throws IOException; + + /** + * Set the group membership of a set of tables + * + * @param tableNames + * @param groupName + * @throws java.io.IOException + */ + void moveTables(Set tableNames, String groupName) throws IOException; + + /** + * List the groups + * + * @return list of GroupInfo + * @throws java.io.IOException + */ + List listGroups() throws IOException; + + /** + * Refresh/reload the group information from + * the persistent store + * + * @throws java.io.IOException + */ + void refresh() throws IOException; + + /** + * Whether the manager is able to fully + * return group metadata + * + * @return + */ + boolean isOnline(); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java new file mode 100644 index 0000000..83c0c2d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java @@ -0,0 +1,667 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.MetaScanner; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; +import org.apache.hadoop.hbase.security.access.AccessControlLists; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKTable; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This is an implementation of {@link GroupInfoManager}. Which makes + * use of an HBase table as the persistence store for the group information. + * It also makes use of zookeeper to store group information needed + * for bootstrapping during offline mode. + */ +public class GroupInfoManagerImpl implements GroupInfoManager, ServerListener { + private static final Log LOG = LogFactory.getLog(GroupInfoManagerImpl.class); + + /** Table descriptor for hbase:rsgroup catalog table */ + private final static HTableDescriptor GROUP_TABLE_DESC; + static { + GROUP_TABLE_DESC = new HTableDescriptor(GROUP_TABLE_NAME_BYTES); + GROUP_TABLE_DESC.addFamily(new HColumnDescriptor(META_FAMILY_BYTES)); + GROUP_TABLE_DESC.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); + } + + //Access to this map should always be synchronized. + private volatile Map groupMap; + private volatile Map tableMap; + private MasterServices master; + private HTableInterface groupTable; + private ZooKeeperWatcher watcher; + private GroupStartupWorker groupStartupWorker; + //contains list of groups that were last flushed to persistent store + private volatile Set prevGroups; + private GroupSerDe groupSerDe; + private DefaultServerUpdater defaultServerUpdater; + + + public GroupInfoManagerImpl(MasterServices master) throws IOException { + this.groupMap = Collections.EMPTY_MAP; + this.tableMap = Collections.EMPTY_MAP; + groupSerDe = new GroupSerDe(); + this.master = master; + this.watcher = master.getZooKeeper(); + groupStartupWorker = new GroupStartupWorker(this, master); + prevGroups = new HashSet(); + refresh(); + groupStartupWorker.start(); + defaultServerUpdater = new DefaultServerUpdater(this); + master.getServerManager().registerListener(this); + defaultServerUpdater.start(); + } + + /** + * Adds the group. + * + * @param groupInfo the group name + */ + @Override + public synchronized void addGroup(GroupInfo groupInfo) throws IOException { + if (groupMap.get(groupInfo.getName()) != null || + groupInfo.getName().equals(GroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group already exists: "+groupInfo.getName()); + } + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(groupInfo.getName(), groupInfo); + flushConfig(newGroupMap); + } + + @Override + public synchronized boolean moveServers(Set hostPorts, String srcGroup, + String dstGroup) throws IOException { + GroupInfo src = new GroupInfo(getGroup(srcGroup)); + GroupInfo dst = new GroupInfo(getGroup(dstGroup)); + boolean foundOne = false; + for(HostPort el: hostPorts) { + foundOne = src.removeServer(el) || foundOne; + dst.addServer(el); + } + + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(src.getName(), src); + newGroupMap.put(dst.getName(), dst); + + flushConfig(newGroupMap); + return foundOne; + } + + /** + * Gets the group info of server. + * + * @param hostPort the server + * @return An instance of GroupInfo. + */ + @Override + public GroupInfo getGroupOfServer(HostPort hostPort) throws IOException { + for (GroupInfo info : groupMap.values()) { + if (info.containsServer(hostPort)){ + return info; + } + } + return getGroup(GroupInfo.DEFAULT_GROUP); + } + + /** + * Gets the group information. + * + * @param groupName + * the group name + * @return An instance of GroupInfo + */ + @Override + public GroupInfo getGroup(String groupName) throws IOException { + GroupInfo groupInfo = groupMap.get(groupName); + return groupInfo; + } + + + + @Override + public String getGroupOfTable(TableName tableName) throws IOException { + return tableMap.get(tableName); + } + + @Override + public synchronized void moveTables(Set tableNames, String groupName) throws IOException { + if (groupName != null && !groupMap.containsKey(groupName)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a special group"); + } + Map newGroupMap = Maps.newHashMap(groupMap); + for(TableName tableName: tableNames) { + if (tableMap.containsKey(tableName)) { + GroupInfo src = new GroupInfo(groupMap.get(tableMap.get(tableName))); + src.removeTable(tableName); + newGroupMap.put(src.getName(), src); + } + if(groupName != null) { + GroupInfo dst = new GroupInfo(newGroupMap.get(groupName)); + dst.addTable(tableName); + newGroupMap.put(dst.getName(), dst); + } + } + + flushConfig(newGroupMap); + } + + + /** + * Delete a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + @Override + public synchronized void removeGroup(String groupName) throws IOException { + if (!groupMap.containsKey(groupName) || groupName.equals(GroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a reserved group"); + } + Map newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.remove(groupName); + flushConfig(newGroupMap); + } + + @Override + public List listGroups() throws IOException { + List list = Lists.newLinkedList(groupMap.values()); + return list; + } + + @Override + public boolean isOnline() { + return groupStartupWorker.isOnline(); + } + + @Override + public synchronized void refresh() throws IOException { + refresh(false); + } + + private synchronized void refresh(boolean forceOnline) throws IOException { + List groupList = new LinkedList(); + + //overwrite anything read from zk, group table is source of truth + //if online read from GROUP table + if (forceOnline || isOnline()) { + LOG.debug("Refreshing in Online mode."); + if (groupTable == null) { + groupTable = new HTable(master.getConfiguration(), GROUP_TABLE_NAME); + } + groupList.addAll(groupSerDe.retrieveGroupList(groupTable)); + } else { + LOG.debug("Refershing in Offline mode."); + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, groupZNode); + groupList.addAll(groupSerDe.retrieveGroupList(watcher, groupBasePath)); + } + + //refresh default group, prune + NavigableSet orphanTables = new TreeSet(); + for(String entry: master.getTableDescriptors().getAll().keySet()) { + orphanTables.add(TableName.valueOf(entry)); + } + + List specialTables; + if(!master.isInitialized()) { + specialTables = new ArrayList(); + specialTables.add(AccessControlLists.ACL_TABLE_NAME); + specialTables.add(TableName.META_TABLE_NAME); + specialTables.add(TableName.NAMESPACE_TABLE_NAME); + specialTables.add(GroupInfoManager.GROUP_TABLE_NAME); + } else { + specialTables = + master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + } + + for(TableName table : specialTables) { + orphanTables.add(table); + } + for(GroupInfo group: groupList) { + if(!group.getName().equals(GroupInfo.DEFAULT_GROUP)) { + orphanTables.removeAll(group.getTables()); + } + } + + //This is added to the last of the list + //so it overwrites the default group loaded + //from region group table or zk + groupList.add(new GroupInfo(GroupInfo.DEFAULT_GROUP, + new TreeSet(getDefaultServers()), + orphanTables)); + + + //populate the data + HashMap newGroupMap = Maps.newHashMap(); + HashMap newTableMap = Maps.newHashMap(); + for (GroupInfo group : groupList) { + newGroupMap.put(group.getName(), group); + for(TableName table: group.getTables()) { + newTableMap.put(table, group.getName()); + } + } + groupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + prevGroups.clear(); + prevGroups.addAll(groupMap.keySet()); + } + + private synchronized Map flushConfigTable(Map newGroupMap) throws IOException { + Map newTableMap = Maps.newHashMap(); + Put put = new Put(ROW_KEY); + Delete delete = new Delete(ROW_KEY); + + //populate deletes + for(String groupName : prevGroups) { + if(!newGroupMap.containsKey(groupName)) { + delete.deleteColumns(META_FAMILY_BYTES, Bytes.toBytes(groupName)); + } + } + + //populate puts + for(GroupInfo groupInfo : newGroupMap.values()) { + RSGroupProtos.GroupInfo proto = ProtobufUtil.toProtoGroupInfo(groupInfo); + put.add(META_FAMILY_BYTES, + Bytes.toBytes(groupInfo.getName()), + proto.toByteArray()); + for(TableName entry: groupInfo.getTables()) { + newTableMap.put(entry, groupInfo.getName()); + } + } + + RowMutations rowMutations = new RowMutations(ROW_KEY); + if(put.size() > 0) { + rowMutations.add(put); + } + if(delete.size() > 0) { + rowMutations.add(delete); + } + if(rowMutations.getMutations().size() > 0) { + groupTable.mutateRow(rowMutations); + } + return newTableMap; + } + + private synchronized void flushConfig(Map newGroupMap) throws IOException { + Map newTableMap; + //this should only not enter during startup + if(!isOnline()) { + LOG.error("Still in Offline mode."); + throw new IOException("Still in Offline mode."); + } + + newTableMap = flushConfigTable(newGroupMap); + + //make changes visible since it has been + //persisted in the source of truth + groupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + + try { + //Write zk data first since that's what we'll read first + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, groupZNode); + ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufUtil.PB_MAGIC); + + List zkOps = new ArrayList(newGroupMap.size()); + for(String groupName : prevGroups) { + if(!newGroupMap.containsKey(groupName)) { + String znode = ZKUtil.joinZNode(groupBasePath, groupName); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + } + } + + + for(GroupInfo groupInfo : newGroupMap.values()) { + String znode = ZKUtil.joinZNode(groupBasePath, groupInfo.getName()); + RSGroupProtos.GroupInfo proto = ProtobufUtil.toProtoGroupInfo(groupInfo); + LOG.debug("Updating znode: "+znode); + ZKUtil.createAndFailSilent(watcher, znode); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, + ProtobufUtil.prependPBMagic(proto.toByteArray()))); + } + LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); + + ZKUtil.multiOrSequential(watcher, zkOps, false); + } catch (KeeperException e) { + LOG.error("Failed to write to groupZNode", e); + master.abort("Failed to write to groupZNode", e); + throw new IOException("Failed to write to groupZNode",e); + } + + prevGroups.clear(); + prevGroups.addAll(newGroupMap.keySet()); + } + + private List getOnlineRS() throws IOException { + if (master != null) { + return master.getServerManager().getOnlineServersList(); + } + try { + LOG.debug("Reading online RS from zookeeper"); + List servers = new LinkedList(); + for (String el: ZKUtil.listChildrenNoWatch(watcher, watcher.rsZNode)) { + servers.add(ServerName.parseServerName(el)); + } + return servers; + } catch (KeeperException e) { + throw new IOException("Failed to retrieve server list from zookeeper", e); + } + } + + private List getDefaultServers() throws IOException { + List defaultServers = new LinkedList(); + for(ServerName server : getOnlineRS()) { + HostPort hostPort = new HostPort(server.getHostname(), server.getPort()); + boolean found = false; + for(GroupInfo groupInfo : groupMap.values()) { + if(!GroupInfo.DEFAULT_GROUP.equals(groupInfo.getName()) && + groupInfo.containsServer(hostPort)) { + found = true; + break; + } + } + if(!found) { + defaultServers.add(hostPort); + } + } + return defaultServers; + } + + private synchronized void updateDefaultServers( + NavigableSet hostPort) throws IOException { + if(!isOnline()) { + LOG.info("Offline mode. Skipping update of default servers"); + return; + } + GroupInfo info = groupMap.get(GroupInfo.DEFAULT_GROUP); + GroupInfo newInfo = new GroupInfo(info.getName(), hostPort, info.getTables()); + HashMap newGroupMap = Maps.newHashMap(groupMap); + newGroupMap.put(newInfo.getName(), newInfo); + flushConfig(newGroupMap); + } + + @Override + public void serverAdded(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + @Override + public void serverRemoved(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + private static class DefaultServerUpdater extends Thread { + private static final Log LOG = LogFactory.getLog(DefaultServerUpdater.class); + private GroupInfoManagerImpl mgr; + private boolean hasChanged = false; + + public DefaultServerUpdater(GroupInfoManagerImpl mgr) { + this.mgr = mgr; + } + + public void run() { + List prevDefaultServers = new LinkedList(); + while(!mgr.master.isAborted() || !mgr.master.isStopped()) { + try { + LOG.info("Updating default servers."); + List servers = mgr.getDefaultServers(); + Collections.sort(servers); + if(!servers.equals(prevDefaultServers)) { + mgr.updateDefaultServers(new TreeSet(servers)); + prevDefaultServers = servers; + LOG.info("Updated with servers: "+servers.size()); + } + try { + synchronized (this) { + if(!hasChanged) { + wait(); + } + hasChanged = false; + } + } catch (InterruptedException e) { + } + } catch (IOException e) { + LOG.warn("Failed to update default servers", e); + } + } + } + + public void serverChanged() { + synchronized (this) { + hasChanged = true; + this.notify(); + } + } + } + + + private static class GroupStartupWorker extends Thread { + private static final Log LOG = LogFactory.getLog(GroupStartupWorker.class); + + private Configuration conf; + private volatile boolean isOnline = false; + private MasterServices masterServices; + private GroupInfoManagerImpl groupInfoManager; + + public GroupStartupWorker(GroupInfoManagerImpl groupInfoManager, + MasterServices masterServices) { + this.conf = masterServices.getConfiguration(); + this.masterServices = masterServices; + this.groupInfoManager = groupInfoManager; + setName(GroupStartupWorker.class.getName()+"-"+masterServices.getServerName()); + setDaemon(true); + } + + @Override + public void run() { + if(waitForGroupTableOnline()) { + LOG.info("GroupBasedLoadBalancer is now online"); + } + } + + public boolean waitForGroupTableOnline() { + final List foundRegions = new LinkedList(); + final List assignedRegions = new LinkedList(); + final AtomicBoolean found = new AtomicBoolean(false); + boolean createSent = false; + + while (!found.get() && isMasterRunning()) { + foundRegions.clear(); + assignedRegions.clear(); + found.set(true); + try { + final HConnection conn = HConnectionManager.getConnection(conf); + boolean rootMetaFound = + masterServices.getCatalogTracker().verifyMetaRegionLocation(50); + final AtomicBoolean nsFound = new AtomicBoolean(false); + if (rootMetaFound) { + final ZKTable zkTable = new ZKTable(masterServices.getZooKeeper()); + MetaScanner.MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() { + @Override + public boolean processRow(Result row) throws IOException { + HRegionInfo info = HRegionInfo.getHRegionInfo(row); + if (info != null) { + Cell serverCell = + row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + if (GROUP_TABLE_NAME.equals(info.getTable()) && serverCell != null) { + ServerName sn = + ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell)); + if (sn == null) { + found.set(false); + } else if (zkTable.isEnabledTable(GROUP_TABLE_NAME)) { + try { + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ProtobufUtil.get(rs, info.getRegionName(), new Get(ROW_KEY)); + assignedRegions.add(info); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + foundRegions.add(info); + } + if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) { + Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + ServerName sn = null; + if(cell != null) { + sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell)); + } + if (zkTable.isEnabledTable(TableName.NAMESPACE_TABLE_NAME)) { + try { + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ProtobufUtil.get(rs, info.getRegionName(), new Get(ROW_KEY)); + nsFound.set(true); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + } + } + return true; + } + }; + MetaScanner.metaScan(conf, visitor); + // if no regions in meta then we have to create the table + if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) { + groupInfoManager.createGroupTable(masterServices); + createSent = true; + } + LOG.info("Group table: " + GROUP_TABLE_NAME + " isOnline: " + found.get() + + ", regionCount: " + foundRegions.size() + ", assignCount: " + + assignedRegions.size() + ", rootMetaFound: "+rootMetaFound); + found.set(found.get() && assignedRegions.size() == foundRegions.size() + && foundRegions.size() > 0); + } else { + LOG.info("Waiting for catalog tables to come online"); + found.set(false); + } + if (found.get()) { + LOG.debug("With group table online, refreshing cached information."); + groupInfoManager.refresh(true); + isOnline = true; + //flush any inconsistencies between ZK and HTable + groupInfoManager.flushConfig(groupInfoManager.groupMap); + } + } catch(Exception e) { + found.set(false); + LOG.warn("Failed to perform check", e); + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + LOG.info("Sleep interrupted", e); + } + } + return found.get(); + } + + public boolean isOnline() { + return isOnline; + } + + private boolean isMasterRunning() { + return !masterServices.isAborted() && !masterServices.isStopped(); + } + } + + private void createGroupTable(MasterServices masterServices) throws IOException { + HRegionInfo newRegions[] = new HRegionInfo[]{ + new HRegionInfo(GROUP_TABLE_DESC.getTableName(), null, null)}; + //we need to create the table this way to bypass + //checkInitialized + masterServices.getExecutorService() + .submit(new CreateTableHandler( + masterServices, + masterServices.getMasterFileSystem(), + GROUP_TABLE_DESC, + masterServices.getConfiguration(), + newRegions, + masterServices).prepare()); + //wait for region to be online + int tries = 600; + while(masterServices.getAssignmentManager().getRegionStates() + .getRegionServerOfRegion(newRegions[0]) == null && tries > 0) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new IOException("Wait interrupted", e); + } + tries--; + } + if(tries <= 0) { + throw new IOException("Failed to create group table."); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java new file mode 100644 index 0000000..cf32647 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupSerDe.java @@ -0,0 +1,88 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.NavigableMap; + +//TODO do better encapsulation of SerDe logic from GroupInfoManager and GroupTracker +public class GroupSerDe { + private static final Log LOG = LogFactory.getLog(GroupSerDe.class); + + public GroupSerDe() { + + } + + public List retrieveGroupList(HTableInterface groupTable) throws IOException { + List groupInfoList = Lists.newArrayList(); + Result result = groupTable.get(new Get(GroupInfoManager.ROW_KEY)); + if(!result.isEmpty()) { + NavigableMap> dataMap = result.getNoVersionMap(); + for(byte[] groupName: dataMap.get(GroupInfoManager.META_FAMILY_BYTES).keySet()) { + RSGroupProtos.GroupInfo proto = + RSGroupProtos.GroupInfo.parseFrom( + dataMap.get(GroupInfoManager.META_FAMILY_BYTES).get(groupName)); + groupInfoList.add(ProtobufUtil.toGroupInfo(proto)); + } + } + return groupInfoList; + } + + public List retrieveGroupList(ZooKeeperWatcher watcher, + String groupBasePath) throws IOException { + List groupInfoList = Lists.newArrayList(); + //Overwrite any info stored by table, this takes precedence + try { + if(ZKUtil.checkExists(watcher, groupBasePath) != -1) { + for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) { + byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + groupInfoList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.GroupInfo.parseFrom(bis))); + } + } + LOG.debug("Read ZK GroupInfo count:" + groupInfoList.size()); + } + } catch (KeeperException e) { + throw new IOException("Failed to read groupZNode",e); + } catch (DeserializationException e) { + throw new IOException("Failed to read groupZNode",e); + } + return groupInfoList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupTracker.java new file mode 100644 index 0000000..f0c0a8f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupTracker.java @@ -0,0 +1,341 @@ +/* + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class GroupTracker extends ZooKeeperNodeTracker { + private static final Log LOG = LogFactory.getLog(GroupTracker.class); + + private List listeners = Collections.synchronizedList(new ArrayList()); + private GroupSerDe groupSerDe = new GroupSerDe(); + private volatile Map groupMap = new HashMap(); + private volatile Map serverMap = new HashMap(); + private RegionServerTracker rsTracker; + private volatile boolean started = false; + + /** + * Constructs a new ZK node tracker. + *

+ *

After construction, use {@link #start} to kick off tracking. + * + * @param watcher + * @param abortable + */ + public GroupTracker(ZooKeeperWatcher watcher, Abortable abortable) throws IOException { + //TODO make period configurable + super(watcher, + ZKUtil.joinZNode(watcher.baseZNode, GroupInfoManager.groupZNode), + abortable!=null?abortable:new PersistentAbortable(10000)); + if(abortable == null) { + ((PersistentAbortable)this.abortable).setGroupTracker(this); + } + rsTracker = new RegionServerTracker(watcher, abortable, this); + try { + ZKUtil.listChildrenAndWatchThem(watcher, node); + rsTracker.start(); + } catch (KeeperException e) { + throw new IOException("Failed to start RS tracker", e); + } + } + + public void addListener(Listener listener) { + listeners.add(listener); + } + + public void removeListener(Listener listener) { + listeners.remove(listener); + } + + @Override + public synchronized void start() { + super.start(); + started = true; + } + + @Override + public void nodeCreated(String path) { + if (path.equals(node)) { + refresh(); + } + } + + @Override + public void nodeDataChanged(String path) { + if (path.equals(node)) { + nodeCreated(path); + } + } + + @Override + public void nodeChildrenChanged(String path) { + if (path.startsWith(node)) { + refresh(); + } + } + + public void blockUntilReady(int timeout) throws InterruptedException, IOException { + blockUntilAvailable(timeout, false); + if(getData(false) != null) { + refresh(false); + } + } + + private void refresh() { + try { + refresh(false); + } catch (IOException e) { + this.abortable.abort("Failed to read group znode", e); + } + } + + private synchronized void refresh(boolean force) throws IOException { + List onlineRS = rsTracker.getOnlineServers(); + Set hostPorts = new HashSet(); + for(ServerName entry: onlineRS) { + hostPorts.add(new HostPort(entry.getHostname(), entry.getPort())); + } + Map tmpGroupMap = new HashMap(); + Map tmpServerMap = new HashMap(); + for(GroupInfo groupInfo: listGroups()) { + tmpGroupMap.put(groupInfo.getName(), groupInfo); + for(HostPort server: groupInfo.getServers()) { + tmpServerMap.put(server, groupInfo); + hostPorts.remove(server); + } + } + GroupInfo groupInfo = tmpGroupMap.get(GroupInfo.DEFAULT_GROUP); + groupInfo.addAllServers(hostPorts); + for(HostPort entry: hostPorts) { + tmpServerMap.put(entry, groupInfo); + } + + //when reading sync on "this" if groupMap<->serverMap + //invariant needs to be guaranteed + groupMap = tmpGroupMap; + serverMap = tmpServerMap; + + Map map = getGroupMap(); + for(Listener listener : listeners) { + listener.groupMapChanged(map); + } + } + + private List listGroups() throws IOException { + return groupSerDe.retrieveGroupList(watcher, node); + } + + public GroupInfo getGroup(String name) { + GroupInfo groupInfo = groupMap.get(name); + return groupInfo; + } + + public GroupInfo getGroupOfServer(String hostPort) { + GroupInfo groupInfo = serverMap.get(hostPort); + return groupInfo; + } + + public Map getGroupMap() { + return Collections.unmodifiableMap(groupMap); + } + + public interface Listener { + public void groupMapChanged(Map groupMap); + } + + + /** + * This class is copied for RegionServerTracker + * We need our own since the other one was tied to ServerManager + * and thus the master + */ + private static class RegionServerTracker extends ZooKeeperListener { + private static final Log LOG = LogFactory.getLog(RegionServerTracker.class); + private volatile List regionServers = new ArrayList(); + private Abortable abortable; + private GroupTracker groupTracker; + + public RegionServerTracker(ZooKeeperWatcher watcher, + Abortable abortable, GroupTracker groupTracker) { + super(watcher); + this.abortable = abortable; + this.groupTracker = groupTracker; + } + + public void start() throws KeeperException, IOException { + watcher.registerListener(this); + refresh(); + } + + private void add(final List servers) throws IOException { + List temp = new ArrayList(); + for (String n: servers) { + ServerName sn = ServerName.parseServerName(ZKUtil.getNodeName(n)); + temp.add(sn); + } + regionServers = temp; + //we're refreshing groups, since default membership + //is dynamic and new servers may end up as new default group members + refreshGroups(); + } + + private void remove(final ServerName sn) { + List temp = new ArrayList(); + for(ServerName el: regionServers) { + if(!sn.equals(el)) { + temp.add(el); + } + } + regionServers = temp; + refreshGroups(); + } + + private void refreshGroups() { + if(groupTracker.started && groupTracker.getData(false) != null) { + groupTracker.refresh(); + } + } + + public void refresh() throws KeeperException, IOException { + List servers = + ZKUtil.listChildrenAndWatchThem(watcher, watcher.rsZNode); + add(servers); + } + + @Override + public void nodeDeleted(String path) { + if (path.startsWith(watcher.rsZNode)) { + String serverName = ZKUtil.getNodeName(path); + LOG.info("RegionServer ephemeral node deleted, processing expiration [" + + serverName + "]"); + ServerName sn = ServerName.parseServerName(serverName); + remove(sn); + } + } + + @Override + public void nodeChildrenChanged(String path) { + if (path.equals(watcher.rsZNode)) { + try { + List servers = + ZKUtil.listChildrenAndWatchThem(watcher, watcher.rsZNode); + add(servers); + } catch (IOException e) { + abortable.abort("Unexpected zk exception getting RS nodes", e); + } catch (KeeperException e) { + abortable.abort("Unexpected zk exception getting RS nodes", e); + } + } + } + + /** + * Gets the online servers. + * @return list of online servers + */ + public List getOnlineServers() { + return regionServers; + } + } + + private static class Refresher extends Thread { + private final static Log LOG = LogFactory.getLog(Refresher.class); + private GroupTracker groupTracker; + private volatile boolean isRunning = true; + private int period; + + public Refresher(GroupTracker groupTracker, int period) { + this.groupTracker = groupTracker; + this.period = period; + this.setDaemon(true); + } + + public boolean isRunning() { + return isRunning; + } + + @Override + public void run() { + while(true) { + try { + groupTracker.rsTracker.refresh(); + groupTracker.refresh(true); + LOG.info("Recovery refresh successful"); + isRunning = false; + return; + } catch (IOException e) { + LOG.warn("Failed to refresh", e); + } catch (KeeperException e) { + LOG.warn("Failed to refresh", e); + } + try { + Thread.sleep(period); + } catch (InterruptedException e) { + } + } + } + } + + private static class PersistentAbortable implements Abortable { + private final Log LOG = LogFactory.getLog(Abortable.class); + private Refresher refresher; + private GroupTracker groupTracker; + private int period; + + + public PersistentAbortable(int period) { + this.period = period; + } + + public void setGroupTracker(GroupTracker groupTracker) { + this.groupTracker = groupTracker; + } + + @Override + public void abort(String why, Throwable e) { + LOG.warn("Launching referesher because of abort: "+why, e); + if(refresher == null || !refresher.isRunning()) { + refresher = new Refresher(groupTracker, period); + } + } + + @Override + public boolean isAborted() { + return false; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupableBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupableBalancer.java new file mode 100644 index 0000000..e696926 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupableBalancer.java @@ -0,0 +1,12 @@ +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.LoadBalancer; + +import java.io.IOException; + +@InterfaceAudience.Private +public interface GroupableBalancer extends LoadBalancer { + + void setGroupInfoManager(GroupInfoManager groupInfoManager) throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java new file mode 100644 index 0000000..6ccd0ab --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java @@ -0,0 +1,71 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public interface MXBean { + + public Map> getServersByGroup() throws IOException; + + public List getGroups() throws IOException; + + public static class GroupInfoBean { + + private String name; + private List servers; + private List tables; + private List offlineServers; + + //Need this to convert NavigableSet to List + public GroupInfoBean(GroupInfo groupInfo, List offlineServers) { + this.name = groupInfo.getName(); + this.offlineServers = offlineServers; + this.servers = new LinkedList(); + this.servers.addAll(groupInfo.getServers()); + this.tables = new LinkedList(); + this.tables.addAll(groupInfo.getTables()); + } + + public String getName() { + return name; + } + + public List getServers() { + return servers; + } + + public List getOfflineServers() { + return offlineServers; + } + + public List getTables() { + return tables; + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java new file mode 100644 index 0000000..5836d2d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java @@ -0,0 +1,95 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.master.MasterServices; + +import java.io.IOException; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class MXBeanImpl implements MXBean { + private static final Log LOG = LogFactory.getLog(MXBeanImpl.class); + + private static MXBeanImpl instance = null; + + private GroupAdmin groupAdmin; + private MasterServices master; + + public synchronized static MXBeanImpl init( + final GroupAdmin groupAdmin, + MasterServices master) { + if (instance == null) { + instance = new MXBeanImpl(groupAdmin, master); + } + return instance; + } + + protected MXBeanImpl(final GroupAdmin groupAdmin, + MasterServices master) { + this.groupAdmin = groupAdmin; + this.master = master; + } + + @Override + public Map> getServersByGroup() throws IOException { + Map> data = new HashMap>(); + for (final ServerName entry : + master.getServerManager().getOnlineServersList()) { + GroupInfo groupInfo = groupAdmin.getGroupOfServer( + new HostPort(entry.getHostname(), entry.getPort())); + if(!data.containsKey(groupInfo.getName())) { + data.put(groupInfo.getName(), new LinkedList()); + } + data.get(groupInfo.getName()).add(entry.getHostPort()); + } + return data; + } + + @Override + public List getGroups() throws IOException { + Set onlineServers = Sets.newHashSet(); + for (ServerName entry: master.getServerManager().getOnlineServersList()) { + onlineServers.add(new HostPort(entry.getHostname(), entry.getPort())); + } + List list = Lists.newArrayList(); + for (GroupInfo group: groupAdmin.listGroups()) { + List deadServers = Lists.newArrayList(); + for (HostPort server: group.getServers()) { + if (!onlineServers.contains(server)) { + deadServers.add(server); + } + } + list.add(new GroupInfoBean(group, deadServers)); + } + return list; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 6df721b..f95dceb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -2113,7 +2113,7 @@ public class AssignmentManager extends ZooKeeperListener { } } LOG.info("Assigning " + region.getRegionNameAsString() + - " to " + plan.getDestination().toString()); + " to " + plan.getDestination()); // Transition RegionState to PENDING_OPEN currentState = regionStates.updateRegionState(region, State.PENDING_OPEN, plan.getDestination()); @@ -2402,8 +2402,13 @@ public class AssignmentManager extends ZooKeeperListener { || existingPlan.getDestination() == null || !destServers.contains(existingPlan.getDestination())) { newPlan = true; - randomPlan = new RegionPlan(region, null, - balancer.randomAssignment(region, destServers)); + try { + randomPlan = new RegionPlan(region, null, + balancer.randomAssignment(region, destServers)); + } catch (IOException ex) { + LOG.warn("Failed to create new plan.",ex); + return null; + } if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) { List regions = new ArrayList(1); regions.add(region); @@ -2690,6 +2695,19 @@ public class AssignmentManager extends ZooKeeperListener { Map> bulkPlan = balancer.retainAssignment(regions, servers); + if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + // Found no plan for some regions, put those regions in RIT + for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { + if (tomActivated) { + // Set to offline so that tom will handle it + regionStates.updateRegionState(hri, State.OFFLINE); + } else { + regionStates.updateRegionState(hri, State.FAILED_OPEN); + } + } + bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); + } + assign(regions.size(), servers.size(), "retainAssignment=true", bulkPlan); } @@ -2716,6 +2734,20 @@ public class AssignmentManager extends ZooKeeperListener { // Generate a round-robin bulk assignment plan Map> bulkPlan = balancer.roundRobinAssignment(regions, servers); + + if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + // Found no plan for some regions, put those regions in RIT + for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { + if (tomActivated) { + // Set to offline so that tom will handle it + regionStates.updateRegionState(hri, State.OFFLINE); + } else { + regionStates.updateRegionState(hri, State.FAILED_OPEN); + } + } + bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); + } + processFavoredNodes(regions); assign(regions.size(), servers.size(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index aaaef80..2e87d7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -41,8 +41,10 @@ import java.util.concurrent.atomic.AtomicReference; import javax.management.ObjectName; +import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; @@ -76,6 +78,7 @@ import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -83,6 +86,9 @@ import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; +import org.apache.hadoop.hbase.group.GroupAdminServer; +import org.apache.hadoop.hbase.group.GroupInfo; +import org.apache.hadoop.hbase.group.GroupableBalancer; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; @@ -121,8 +127,12 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Re import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddGroupResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceGroupResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; @@ -151,6 +161,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusR import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetGroupInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; @@ -171,6 +187,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshot import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListGroupInfosResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; @@ -185,8 +203,14 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RemoveGroupResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; @@ -361,6 +385,8 @@ MasterServices, Server { private LoadBalancerTracker loadBalancerTracker; // master address tracker private MasterAddressTracker masterAddressTracker; + // group admin apis + private GroupAdminServer groupAdminServer; // RPC server for the HMaster private final RpcServerInterface rpcServer; @@ -968,6 +994,11 @@ MasterServices, Server { this.initializationBeforeMetaAssignment = true; + if (balancer instanceof GroupableBalancer) { + groupAdminServer = new GroupAdminServer(this); + ((GroupableBalancer)balancer).setGroupInfoManager(groupAdminServer.getGroupInfoManager()); + } + //initialize load balancer this.balancer.setClusterStatus(getClusterStatus()); this.balancer.setMasterServices(this); @@ -1795,11 +1826,17 @@ MasterServices, Server { final byte[] destServerName) throws HBaseIOException { RegionState regionState = assignmentManager.getRegionStates(). getRegionState(Bytes.toString(encodedRegionName)); - if (regionState == null) { + + HRegionInfo hri; + if (Bytes.toString(encodedRegionName) + .equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) { + hri = HRegionInfo.FIRST_META_REGIONINFO; + } else if (regionState != null) { + hri = regionState.getRegion(); + } else { throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); } - HRegionInfo hri = regionState.getRegion(); ServerName dest; if (destServerName == null || destServerName.length == 0) { LOG.info("Passed destination servername is null/empty so " + @@ -1807,8 +1844,17 @@ MasterServices, Server { final List destServers = this.serverManager.createDestinationServersList( regionState.getServerName()); dest = balancer.randomAssignment(hri, destServers); + if (dest == null) { + LOG.debug("Unable to determine a plan to assign " + hri); + return; + } } else { - dest = ServerName.valueOf(Bytes.toString(destServerName)); + ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName)); + dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate)); + if (dest == null) { + LOG.debug("Unable to determine a plan to assign " + hri); + return; + } if (dest.equals(regionState.getServerName())) { LOG.debug("Skipping move of region " + hri.getRegionNameAsString() + " because region already assigned to the same server " + dest + "."); @@ -2143,7 +2189,7 @@ MasterServices, Server { throws ServiceException { try { addColumn(ProtobufUtil.toTableName(req.getTableName()), - HColumnDescriptor.convert(req.getColumnFamilies())); + HColumnDescriptor.convert(req.getColumnFamilies())); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -2174,7 +2220,7 @@ MasterServices, Server { throws ServiceException { try { modifyColumn(ProtobufUtil.toTableName(req.getTableName()), - HColumnDescriptor.convert(req.getColumnFamilies())); + HColumnDescriptor.convert(req.getColumnFamilies())); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -2781,7 +2827,18 @@ MasterServices, Server { } Pair pair = MetaReader.getRegion(this.catalogTracker, regionName); - if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); + if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) { + try { + pair = new Pair(HRegionInfo.FIRST_META_REGIONINFO, + this.catalogTracker.getMetaLocation()); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + if (pair == null) { + throw new UnknownRegionException(Bytes.toString(regionName)); + } + HRegionInfo hri = pair.getFirst(); if (cpHost != null) { if (cpHost.preUnassign(hri, force)) { @@ -3413,6 +3470,12 @@ MasterServices, Server { @Override public void createNamespace(NamespaceDescriptor descriptor) throws IOException { TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName())); + + String group = descriptor.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(group != null && groupAdminServer.getGroupInfo(group) == null) { + throw new ConstraintException("Region server group "+group+" does not exit"); + } + if (cpHost != null) { if (cpHost.preCreateNamespace(descriptor)) { return; @@ -3428,6 +3491,12 @@ MasterServices, Server { @Override public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException { TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName())); + + String group = descriptor.getConfigurationValue(GroupInfo.NAMESPACEDESC_PROP_GROUP); + if(group != null && groupAdminServer.getGroupInfo(group) == null) { + throw new ConstraintException("Region server group "+group+" does not exit"); + } + if (cpHost != null) { if (cpHost.preModifyNamespace(descriptor)) { return; @@ -3573,7 +3642,7 @@ MasterServices, Server { .getDefaultLoadBalancerClass().getName()); } - /** + /** * Returns the security capabilities in effect on the cluster */ @Override @@ -3612,4 +3681,158 @@ MasterServices, Server { } return response.build(); } + + @Override + public LoadBalancer getLoadBalancer() { + return balancer; + } + + @Override + public GroupAdminServer getGroupAdminServer() { + return groupAdminServer; + } + + @Override + public GetGroupInfoResponse getGroupInfo(RpcController controller, GetGroupInfoRequest request) throws ServiceException { + MasterProtos.GetGroupInfoResponse response = null; + try { + MasterProtos.GetGroupInfoResponse.Builder builder = + MasterProtos.GetGroupInfoResponse.newBuilder(); + GroupInfo groupInfo = groupAdminServer.getGroupInfo(request.getGroupName()); + if(groupInfo != null) { + builder.setGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)); + } + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public GetGroupInfoOfTableResponse getGroupInfoOfTable(RpcController controller, + GetGroupInfoOfTableRequest request) throws ServiceException { + MasterProtos.GetGroupInfoOfTableResponse response = null; + try { + MasterProtos.GetGroupInfoOfTableResponse.Builder builder = + MasterProtos.GetGroupInfoOfTableResponse.newBuilder(); + GroupInfo groupInfo = + groupAdminServer.getGroupInfoOfTable(ProtobufUtil.toTableName(request.getTableName())); + response = builder.setGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public MoveServersResponse moveServers(RpcController controller, MoveServersRequest request) throws ServiceException { + MasterProtos.MoveServersResponse response = null; + try { + MasterProtos.MoveServersResponse.Builder builder = + MasterProtos.MoveServersResponse.newBuilder(); + Set hostPorts = Sets.newHashSet(); + for(HBaseProtos.HostPort el: request.getServersList()) { + hostPorts.add(new HostPort(el.getHostName(), el.getPort())); + } + groupAdminServer.moveServers(hostPorts, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public MoveTablesResponse moveTables(RpcController controller, MoveTablesRequest request) throws ServiceException { + MasterProtos.MoveTablesResponse response = null; + try { + MasterProtos.MoveTablesResponse.Builder builder = + MasterProtos.MoveTablesResponse.newBuilder(); + Set tables = new HashSet(request.getTableNameList().size()); + for(HBaseProtos.TableName tableName: request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + groupAdminServer.moveTables(tables, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public AddGroupResponse addGroup(RpcController controller, AddGroupRequest request) throws ServiceException { + MasterProtos.AddGroupResponse response = null; + try { + MasterProtos.AddGroupResponse.Builder builder = + MasterProtos.AddGroupResponse.newBuilder(); + groupAdminServer.addGroup(request.getGroupName()); + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public RemoveGroupResponse removeGroup(RpcController controller, RemoveGroupRequest request) throws ServiceException { + MasterProtos.RemoveGroupResponse response = null; + try { + MasterProtos.RemoveGroupResponse.Builder builder = + MasterProtos.RemoveGroupResponse.newBuilder(); + groupAdminServer.removeGroup(request.getGroupName()); + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public BalanceGroupResponse balanceGroup(RpcController controller, BalanceGroupRequest request) throws ServiceException { + MasterProtos.BalanceGroupResponse response = null; + try { + MasterProtos.BalanceGroupResponse.Builder builder = + MasterProtos.BalanceGroupResponse.newBuilder(); + builder.setBalanceRan(groupAdminServer.balanceGroup(request.getGroupName())); + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public ListGroupInfosResponse listGroupInfos(RpcController controller, + ListGroupInfosRequest request) throws ServiceException { + MasterProtos.ListGroupInfosResponse response = null; + try { + MasterProtos.ListGroupInfosResponse.Builder builder = + MasterProtos.ListGroupInfosResponse.newBuilder(); + for(GroupInfo groupInfo: groupAdminServer.listGroups()) { + builder.addGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)); + } + response = builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } + + @Override + public GetGroupInfoOfServerResponse getGroupInfoOfServer(RpcController controller, + GetGroupInfoOfServerRequest request) throws ServiceException { + MasterProtos.GetGroupInfoOfServerResponse response = null; + try { + MasterProtos.GetGroupInfoOfServerResponse.Builder builder = + MasterProtos.GetGroupInfoOfServerResponse.newBuilder(); + GroupInfo groupInfo = groupAdminServer.getGroupOfServer( + new HostPort(request.getServer().getHostName(), request.getServer().getPort())); + response = builder.setGroupInfo(ProtobufUtil.toProtoGroupInfo(groupInfo)).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + return response; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index e24d745..abd0268 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -48,6 +48,9 @@ import org.apache.hadoop.hbase.Stoppable; @InterfaceAudience.Private public interface LoadBalancer extends Configurable, Stoppable { + //used to signal to the caller that the region(s) cannot be assigned + ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("127.0.0.1,1,1"); + /** * Set the current cluster status. This allows a LoadBalancer to map host name to a server * @param st diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index a9bb081..73faf3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.commons.lang.ClassUtils; import org.apache.commons.logging.Log; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -850,6 +852,116 @@ public class MasterCoprocessorHost } }); } + + public void preMoveServers(final Set servers, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preMoveServers(ctx, servers, targetGroup); + } + }); + } + + public void postMoveServers(final Set servers, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postMoveServers(ctx, servers, targetGroup); + } + }); + } + + public void preMoveTables(final Set tables, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preMoveTables(ctx, tables, targetGroup); + } + }); + } + + public void postMoveTables(final Set tables, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postMoveTables(ctx, tables, targetGroup); + } + }); + } + + public void preAddGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preAddGroup(ctx, name); + } + }); + } + + public void postAddGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postAddGroup(ctx, name); + } + }); + } + + public void preRemoveGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preRemoveGroup(ctx, name); + } + }); + } + + public void postRemoveGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postRemoveGroup(ctx, name); + } + }); + } + + public void preBalanceGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.preBalanceGroup(ctx, name); + } + }); + } + + public void postBalanceGroup(final String name, final boolean balanceRan) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + oserver.postBalanceGroup(ctx, name, balanceRan); + } + }); + } private static abstract class CoprocessorOperation extends ObserverContext { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index c402758..8844ea5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.group.GroupAdminServer; import com.google.protobuf.Service; @@ -250,4 +251,14 @@ public interface MasterServices extends Server { * @throws IOException */ public List listTableNamesByNamespace(String name) throws IOException; + + /** + * @return load balancer + */ + public LoadBalancer getLoadBalancer(); + + /** + * @return load balancer + */ + public GroupAdminServer getGroupAdminServer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 24d8c71..868818f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master.handler; import java.io.IOException; import java.io.InterruptedIOException; import java.security.PrivilegedExceptionAction; +import java.security.acl.Group; import java.util.List; import org.apache.commons.logging.Log; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.group.GroupAdminServer; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; @@ -64,6 +66,7 @@ public class CreateTableHandler extends EventHandler { protected final MasterFileSystem fileSystemManager; protected final HTableDescriptor hTableDescriptor; protected final Configuration conf; + private final MasterServices masterServices; private final AssignmentManager assignmentManager; private final CatalogTracker catalogTracker; private final TableLockManager tableLockManager; @@ -83,6 +86,7 @@ public class CreateTableHandler extends EventHandler { this.catalogTracker = masterServices.getCatalogTracker(); this.assignmentManager = masterServices.getAssignmentManager(); this.tableLockManager = masterServices.getTableLockManager(); + this.masterServices = masterServices; this.tableLock = this.tableLockManager.writeLock(this.hTableDescriptor.getTableName() , EventType.C_M_CREATE_TABLE.toString()); @@ -140,6 +144,15 @@ public class CreateTableHandler extends EventHandler { throw new IOException("Unable to ensure that the table will be" + " enabling because of a ZooKeeper issue", e); } + + //prepare table's group affiliation + //If master is not initialized and a create table is spawned then it is + //a special table and group affilition should be taken care of explicitly + GroupAdminServer groupAdminServer = masterServices.getGroupAdminServer(); + if (groupAdminServer != null && masterServices.isInitialized()) { + groupAdminServer.prepareGroupForTable(hTableDescriptor); + } + success = true; } finally { if (!success) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 474da0a..5164ad7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -134,6 +134,12 @@ public class DeleteTableHandler extends TableEventHandler { // 9. Clean up any remaining rows for this table cleanAnyRemainingRows(); + + // Remove group affiliation + if (masterServices.getGroupAdminServer() != null) { + LOG.debug("Removing " + tableName + " from group."); + masterServices.getGroupAdminServer().cleanupGroupForTable(tableName); + } } if (cpHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 94d716b..cf551f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValueUtil; @@ -419,6 +420,7 @@ public class AccessController extends BaseMasterAndRegionObserver * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ + private void requirePermission(String request, TableName tableName, byte[] family, byte[] qualifier, Action... permissions) throws IOException { User user = getActiveUser(); @@ -2482,4 +2484,34 @@ public class AccessController extends BaseMasterAndRegionObserver public void postReplicateLogEntries(ObserverContext ctx, List entries, CellScanner cells) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + requirePermission("moveServers", Action.ADMIN); + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + requirePermission("moveTables", Action.ADMIN); + } + + @Override + public void preAddGroup(ObserverContext ctx, + String name) throws IOException { + requirePermission("addGroup", Action.ADMIN); + } + + @Override + public void preRemoveGroup(ObserverContext ctx, + String name) throws IOException { + requirePermission("removeGroup", Action.ADMIN); + } + + @Override + public void preBalanceGroup(ObserverContext ctx, + String groupName) throws IOException { + requirePermission("balanceGroup", Action.ADMIN); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index bbabf3a..8189ddb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -29,6 +29,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; import java.util.NavigableMap; +import java.util.Set; import java.util.concurrent.CountDownLatch; import org.apache.commons.logging.Log; @@ -990,6 +991,56 @@ public class TestMasterObserver { public boolean wasGetTableDescriptorsCalled() { return preGetTableDescriptorsCalled && postGetTableDescriptorsCalled; } + + @Override + public void preMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, + Set servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postAddGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void postRemoveGroup(ObserverContext ctx, + String name) throws IOException { + } + + @Override + public void preBalanceGroup(ObserverContext ctx, + String groupName) throws IOException { + } + + @Override + public void postBalanceGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java new file mode 100644 index 0000000..50a303f --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java @@ -0,0 +1,389 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import javax.management.MBeanServer; +import javax.management.ObjectName; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MediumTests.class}) +public class TestGroups extends TestGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestGroups.class); + private static HMaster master; + private static boolean init = false; + + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + GroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().setBoolean( + HConstants.ZOOKEEPER_USEMULTI, + true); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + ""+NUM_SLAVES_BASE); + + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + + //wait for balancer to come online + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((GroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); + } + }); + admin.setBalancerRunning(false, true); + groupAdmin = new VerifyingGroupAdminClient(admin.getConnection().getGroupAdmin(), + TEST_UTIL.getConfiguration()); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeMethod() throws Exception { + if(!init) { + init = true; + afterMethod(); + } + + } + + @After + public void afterMethod() throws Exception { + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + + int missing = NUM_SLAVES_BASE - cluster.getClusterStatus().getServers().size(); + LOG.info("Restoring servers: "+missing); + for(int i=0; i() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish " + groupAdmin.listGroups()); + //Might be greater since moving servers back to default + //is after starting a server + + return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size() + == NUM_SLAVES_BASE; + } + }); + } + + @Test + public void testJmx() throws Exception { + MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); + Iterator it = mBeanServer.queryNames(new ObjectName("hadoop:name=Group,service=Group"), null).iterator(); + //verify it was loaded properly + assertEquals("hadoop:name=Group,service=Group", it.next().getCanonicalName()); + + final AtomicReference deadServer = new AtomicReference(null); + + //We use mocks to simulate offline servers to avoid + //the complexity and overhead of killing servers + MasterServices mockMaster = Mockito.mock(MasterServices.class); + final ServerManager mockServerManager = Mockito.mock(ServerManager.class); + Mockito.when(mockMaster.getServerManager()).thenReturn(mockServerManager); + Mockito.when(mockServerManager.getOnlineServersList()).then(new Answer>() { + @Override + public List answer(InvocationOnMock invocation) throws Throwable { + GroupInfo groupInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + List finalList = Lists.newArrayList(); + HostPort lastServer = groupInfo.getServers().last(); + for (ServerName server: master.getServerManager().getOnlineServersList()) { + if (!server.getHostPort().equals(lastServer)) { + finalList.add(server); + } + } + deadServer.set(lastServer); + return finalList; + } + }); + MXBean info = new MXBeanImpl(groupAdmin, mockMaster); + + + GroupInfo defaultGroup = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertEquals(1, info.getGroups().size()); + assertEquals(defaultGroup.getName(), info.getGroups().get(0).getName()); + assertEquals(defaultGroup.getServers(), Sets.newTreeSet(info.getGroups().get(0).getServers())); + assertEquals(defaultGroup.getServers().headSet(deadServer.get()), + Sets.newTreeSet(info.getServersByGroup().get(GroupInfo.DEFAULT_GROUP))); + + GroupInfo barGroup = addGroup(groupAdmin, "bar", 3); + TableName tableName1 = TableName.valueOf(tablePrefix+"_testJmx1"); + TableName tableName2 = TableName.valueOf(tablePrefix+"_testJmx2"); + TEST_UTIL.createTable(tableName1, Bytes.toBytes("f")); + TEST_UTIL.createTable(tableName2, Bytes.toBytes("f")); + groupAdmin.moveTables(Sets.newHashSet(tableName2), barGroup.getName()); + assertEquals(2, info.getGroups().size()); + + int defaultIndex = -1; + int barIndex = -1; + + for(int i=0; i() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + ServerName targetServer = + ServerName.parseServerName(appInfo.getServers().iterator().next().toString()); + AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + //verify it was assigned to the right group + assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); + //verify prop was not stored as part of the schema + assertNull(admin.getTableDescriptor(tableName).getValue(GroupInfo.TABLEDESC_PROP_GROUP)); + } + + @Test + public void testDefaultNamespaceCreateAndAssign() throws Exception { + LOG.info("testDefaultNamespaceCreateAndAssign"); + final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign"); + admin.modifyNamespace(NamespaceDescriptor.create("default") + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "default").build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + } + + @Test + public void testNamespaceConstraint() throws Exception { + String nsName = tablePrefix+"_foo"; + String groupName = tablePrefix+"_foo"; + LOG.info("testNamespaceConstraint"); + groupAdmin.addGroup(groupName); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, groupName) + .build()); + //test removing a referenced group + try { + groupAdmin.removeGroup(groupName); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + //test modify group + //changing with the same name is fine + admin.modifyNamespace( + NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, groupName) + .build()); + String anotherGroup = tablePrefix+"_anotherGroup"; + groupAdmin.addGroup(anotherGroup); + //test add non-existent group + admin.deleteNamespace(nsName); + groupAdmin.removeGroup(groupName); + try { + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, "foo") + .build()); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + } + + @Test + public void testGroupInfoMultiAccessing() throws Exception { + GroupInfoManager manager = master.getGroupAdminServer().getGroupInfoManager(); + final GroupInfo defaultGroup = manager.getGroup("default"); + // getGroup updates default group's server list + // this process must not affect other threads iterating the list + Iterator it = defaultGroup.getServers().iterator(); + manager.getGroup("default"); + it.next(); + } + + @Test + public void testTracker() throws IOException, InterruptedException { + LOG.info("testTracker"); + ZooKeeperWatcher watcher = + new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "testTracker", null); + GroupTracker tracker = new GroupTracker(watcher, null); + try { + final Map groupMap = new ConcurrentHashMap(); + final AtomicBoolean stateChanged = new AtomicBoolean(false); + GroupTracker.Listener listener = new GroupTracker.Listener() { + + @Override + public void groupMapChanged(Map map) { + groupMap.clear(); + groupMap.putAll(map); + stateChanged.set(true); + } + }; + tracker.addListener(listener); + tracker.start(); + + //wait for tracker to retrieve initial info + tracker.blockUntilReady(0); + int tries = 60000/100; + while(groupMap.size() < 1 && tries > 0) { + Thread.sleep(100); + tries--; + } + assertNotSame(0, tries); + assertNotNull(groupAdmin.getGroupInfo("default")); + + stateChanged.set(false); + groupAdmin.addGroup("foo"); + while(!stateChanged.get()) { + Thread.sleep(100); + } + stateChanged.set(false); + assertEquals(2, groupMap.size()); + assertNotNull(tracker.getGroup("foo")); + assertEquals(0, tracker.getGroup("foo").getServers().size()); + + addGroup(groupAdmin, "bar", 1); + while(!stateChanged.get()) { + Thread.sleep(100); + } + stateChanged.set(false); + assertEquals(3, groupMap.size()); + assertNotNull(tracker.getGroup("bar")); + assertEquals(1, tracker.getGroup("bar").getServers().size()); + } finally { + if(tracker != null) { + tracker.stop(); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java new file mode 100644 index 0000000..6db4578 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java @@ -0,0 +1,567 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; + +import java.io.IOException; +import java.security.SecureRandom; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public abstract class TestGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestGroupsBase.class); + + //shared + protected final static String groupPrefix = "Group"; + protected final static String tablePrefix = "Group"; + protected final static SecureRandom rand = new SecureRandom(); + + //shared, cluster type specific + protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseAdmin admin; + protected static HBaseCluster cluster; + protected static GroupAdmin groupAdmin; + + public final static long WAIT_TIMEOUT = 60000*5; + public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster + + + + protected GroupInfo addGroup(GroupAdmin gAdmin, String groupName, + int serverCount) throws IOException, InterruptedException { + GroupInfo defaultInfo = gAdmin + .getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertTrue(defaultInfo != null); + assertTrue(defaultInfo.getServers().size() >= serverCount); + gAdmin.addGroup(groupName); + + Set set = new HashSet(); + for(HostPort server: defaultInfo.getServers()) { + if(set.size() == serverCount) { + break; + } + set.add(server); + } + gAdmin.moveServers(set, groupName); + GroupInfo result = gAdmin.getGroupInfo(groupName); + assertTrue(result.getServers().size() >= serverCount); + return result; + } + + static void removeGroup(GroupAdminClient groupAdmin, String groupName) throws IOException { + GroupInfo groupInfo = groupAdmin.getGroupInfo(groupName); + groupAdmin.moveTables(groupInfo.getTables(), GroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(groupInfo.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(groupName); + } + + protected void deleteTableIfNecessary() throws IOException { + for (HTableDescriptor desc : TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) { + TEST_UTIL.deleteTable(desc.getName()); + } + } + + protected void deleteNamespaceIfNecessary() throws IOException { + for (NamespaceDescriptor desc : TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors()) { + if(desc.getName().startsWith(tablePrefix)) { + admin.deleteNamespace(desc.getName()); + } + } + } + + protected void deleteGroups() throws IOException { + GroupAdminClient groupAdmin = new GroupAdminClient(TEST_UTIL.getConfiguration()); + for(GroupInfo group: groupAdmin.listGroups()) { + if(!group.getName().equals(GroupInfo.DEFAULT_GROUP)) { + groupAdmin.moveTables(group.getTables(), GroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(group.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(group.getName()); + } + } + } + + public Map> getTableRegionMap() throws IOException { + Map> map = Maps.newTreeMap(); + Map>> tableServerRegionMap + = getTableServerRegionMap(); + for(TableName tableName : tableServerRegionMap.keySet()) { + if(!map.containsKey(tableName)) { + map.put(tableName, new LinkedList()); + } + for(List subset: tableServerRegionMap.get(tableName).values()) { + map.get(tableName).addAll(subset); + } + } + return map; + } + + public Map>> getTableServerRegionMap() + throws IOException { + Map>> map = Maps.newTreeMap(); + ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus(); + for(ServerName serverName : status.getServers()) { + for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) { + TableName tableName = HRegionInfo.getTable(rl.getName()); + if(!map.containsKey(tableName)) { + map.put(tableName, new TreeMap>()); + } + if(!map.get(tableName).containsKey(serverName)) { + map.get(tableName).put(serverName, new LinkedList()); + } + map.get(tableName).get(serverName).add(rl.getNameAsString()); + } + } + return map; + } + + @Test(expected = ConstraintException.class) + public void testGroupInfoOfTableNonExistent() throws Exception { + groupAdmin.getGroupInfoOfTable(TableName.valueOf("nonexistent")); + } + + @Test + public void testCreateMultiRegion() throws IOException { + LOG.info("testCreateMultiRegion"); + byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateMultiRegion"); + byte[] end = {1,3,5,7,9}; + byte[] start = {0,2,4,6,8}; + byte[][] f = {Bytes.toBytes("f")}; + TEST_UTIL.createTable(tableName, f,1,start,end,10); + } + + @Test + public void testCreateAndDrop() throws Exception { + LOG.info("testCreateAndDrop"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndDrop"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("cf")); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(tableName) != null; + } + }); + TEST_UTIL.deleteTable(tableName); + } + + + @Test + public void testSimpleRegionServerMove() throws IOException, + InterruptedException { + LOG.info("testSimpleRegionServerMove"); + + GroupInfo appInfo = addGroup(groupAdmin, groupPrefix + rand.nextInt(), 1); + GroupInfo adminInfo = addGroup(groupAdmin, groupPrefix + rand.nextInt(), 1); + GroupInfo dInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP); + assertEquals(3, groupAdmin.listGroups().size()); + assertEquals(1, adminInfo.getServers().size()); + assertEquals(1, appInfo.getServers().size()); + assertEquals(admin.getClusterStatus().getServers().size() - 2, dInfo.getServers().size()); + groupAdmin.moveServers(appInfo.getServers(), + GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(appInfo.getName()); + groupAdmin.moveServers(adminInfo.getServers(), + GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(adminInfo.getName()); + assertEquals(groupAdmin.listGroups().size(), 1); + } + + @Test + public void testMoveServers() throws Exception { + LOG.info("testMoveServers"); + + //create groups and assign servers + addGroup(groupAdmin, "bar", 3); + groupAdmin.addGroup("foo"); + + GroupInfo barGroup = groupAdmin.getGroupInfo("bar"); + GroupInfo fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(3, barGroup.getServers().size()); + assertEquals(0, fooGroup.getServers().size()); + + //test fail bogus server move + try { + groupAdmin.moveServers(Sets.newHashSet(HostPort.valueOf("foo:9999")),"foo"); + fail("Bogus servers shouldn't have been successfully moved."); + } catch(IOException ex) { + String exp = "Server foo:9999 is not an online server in default group."; + String msg = "Expected '"+exp+"' in exception message: "; + assertTrue(msg+" "+ex.getMessage(), ex.getMessage().contains(exp)); + } + + //test success case + LOG.info("moving servers "+barGroup.getServers()+" to group foo"); + groupAdmin.moveServers(barGroup.getServers(), fooGroup.getName()); + + barGroup = groupAdmin.getGroupInfo("bar"); + fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(0,barGroup.getServers().size()); + assertEquals(3,fooGroup.getServers().size()); + + LOG.info("moving servers "+fooGroup.getServers()+" to group default"); + groupAdmin.moveServers(fooGroup.getServers(), GroupInfo.DEFAULT_GROUP); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return admin.getClusterStatus().getServers().size() == + groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size(); + } + }); + + fooGroup = groupAdmin.getGroupInfo("foo"); + assertEquals(0, fooGroup.getServers().size()); + + //test group removal + LOG.info("Remove group "+barGroup.getName()); + groupAdmin.removeGroup(barGroup.getName()); + assertEquals(null, groupAdmin.getGroupInfo(barGroup.getName())); + LOG.info("Remove group "+fooGroup.getName()); + groupAdmin.removeGroup(fooGroup.getName()); + assertEquals(null, groupAdmin.getGroupInfo(fooGroup.getName())); + } + + @Test + public void testTableMoveTruncateAndDrop() throws Exception { + LOG.info("testTableMove"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testTableMoveAndDrop"); + final byte[] familyNameBytes = Bytes.toBytes("f"); + String newGroupName = "g_" + rand.nextInt(); + final GroupInfo newGroup = addGroup(groupAdmin, newGroupName, 2); + + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 5; + } + }); + + GroupInfo tableGrp = groupAdmin.getGroupInfoOfTable(tableName); + assertTrue(tableGrp.getName().equals(GroupInfo.DEFAULT_GROUP)); + + //change table's group + LOG.info("Moving table "+tableName+" to "+newGroup.getName()); + groupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); + + //verify group change + assertEquals(newGroup.getName(), + groupAdmin.getGroupInfoOfTable(tableName).getName()); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> serverMap = getTableServerRegionMap().get(tableName); + int count = 0; + if (serverMap != null) { + for (ServerName rs : serverMap.keySet()) { + if (newGroup.containsServer(rs.getHostPort())) { + count += serverMap.get(rs).size(); + } + } + } + return count == 5; + } + }); + + //test truncate + admin.disableTable(tableName); + admin.truncateTable(tableName, true); + assertEquals(1, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size()); + assertEquals(tableName, groupAdmin.getGroupInfo(newGroup.getName()).getTables().first()); + + //verify removed table is removed from group + TEST_UTIL.deleteTable(tableName); + assertEquals(0, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size()); + } + + @Test + public void testGroupBalance() throws Exception { + LOG.info("testGroupBalance"); + String newGroupName = "g_" + rand.nextInt(); + final GroupInfo newGroup = addGroup(groupAdmin, newGroupName, 3); + + final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "testGroupBalance"); + admin.createNamespace( + NamespaceDescriptor.create(tableName.getNamespaceAsString()) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, newGroupName).build()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + byte [] startKey = Bytes.toBytes("aaaaa"); + byte [] endKey = Bytes.toBytes("zzzzz"); + admin.createTable(desc, startKey, endKey, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) { + return false; + } + return regions.size() >= 6; + } + }); + + //make assignment uneven, move all regions to one server + Map> assignMap = + getTableServerRegionMap().get(tableName); + final ServerName first = assignMap.entrySet().iterator().next().getKey(); + for(HRegionInfo region: admin.getTableRegions(tableName)) { + if(!assignMap.get(first).contains(region)) { + admin.move(region.getEncodedNameAsBytes(), Bytes.toBytes(first.getServerName())); + } + } + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> map = getTableServerRegionMap().get(tableName); + if (map == null) { + return true; + } + List regions = map.get(first); + if (regions == null) { + return true; + } + return regions.size() >= 6; + } + }); + + //balance the other group and make sure it doesn't affect the new group + groupAdmin.balanceGroup(GroupInfo.DEFAULT_GROUP); + assertEquals(6, getTableServerRegionMap().get(tableName).get(first).size()); + + groupAdmin.balanceGroup(newGroupName); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + for (List regions : getTableServerRegionMap().get(tableName).values()) { + if (2 != regions.size()) { + return false; + } + } + return true; + } + }); + } + + @Test + public void testRegionMove() throws Exception { + LOG.info("testRegionMove"); + + final GroupInfo newGroup = addGroup(groupAdmin, "g_" + rand.nextInt(), 1); + final TableName tableName = TableName.valueOf(tablePrefix + rand.nextInt()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + // All the regions created below will be assigned to the default group. + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 6; + } + }); + + //get target region to move + Map> assignMap = + getTableServerRegionMap().get(tableName); + String targetRegion = null; + for(ServerName server : assignMap.keySet()) { + targetRegion = assignMap.get(server).size() > 0 ? assignMap.get(server).get(0) : null; + if(targetRegion != null) { + break; + } + } + //get server which is not a member of new group + ServerName targetServer = null; + for(ServerName server : admin.getClusterStatus().getServers()) { + if(!newGroup.containsServer(server.getHostPort())) { + targetServer = server; + break; + } + } + + final AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + + //move target server to group + groupAdmin.moveServers(Sets.newHashSet(targetServer.getHostPort()), + newGroup.getName()); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ProtobufUtil.getOnlineRegions(targetRS).size() <= 0; + } + }); + + // Lets move this region to the new group. + TEST_UTIL.getHBaseAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))), + Bytes.toBytes(targetServer.getServerName())); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return + getTableRegionMap().get(tableName) != null && + getTableRegionMap().get(tableName).size() == 6 && + admin.getClusterStatus().getRegionsInTransition().size() < 1; + } + }); + + //verify that targetServer didn't open it + assertFalse(ProtobufUtil.getOnlineRegions(targetRS).contains(targetRegion)); + } + + @Test + public void testFailRemoveGroup() throws IOException, InterruptedException { + LOG.info("testFailRemoveGroup"); + + addGroup(groupAdmin, "bar", 3); + TableName tableName = TableName.valueOf(tablePrefix+"_my_table"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); + groupAdmin.moveTables(Sets.newHashSet(tableName), "bar"); + GroupInfo barGroup = groupAdmin.getGroupInfo("bar"); + //group is not empty therefore it should fail + try { + groupAdmin.removeGroup(barGroup.getName()); + fail("Expected remove group to fail"); + } catch(IOException e) { + } + //group cannot lose all it's servers therefore it should fail + try { + groupAdmin.moveServers(barGroup.getServers(), GroupInfo.DEFAULT_GROUP); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + groupAdmin.moveTables(barGroup.getTables(), GroupInfo.DEFAULT_GROUP); + try { + groupAdmin.removeGroup(barGroup.getName()); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + groupAdmin.moveServers(barGroup.getServers(), GroupInfo.DEFAULT_GROUP); + groupAdmin.removeGroup(barGroup.getName()); + + assertEquals(1, groupAdmin.listGroups().size()); + } + + @Test + public void testKillRS() throws Exception { + LOG.info("testKillRS"); + GroupInfo appInfo = addGroup(groupAdmin, "appInfo", 1); + + + final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "_testKillRS"); + admin.createNamespace( + NamespaceDescriptor.create(tableName.getNamespaceAsString()) + .addConfiguration(GroupInfo.NAMESPACEDESC_PROP_GROUP, appInfo.getName()).build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + + ServerName targetServer = ServerName.parseServerName(appInfo.getServers().first().toString()); + AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0); + assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + + try { + //stopping may cause an exception + //due to the connection loss + targetRS.stopServer(null, + AdminProtos.StopServerRequest.newBuilder().setReason("Die").build()); + } catch(Exception e) { + } + assertFalse(cluster.getClusterStatus().getServers().contains(targetServer)); + + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + TreeSet newServers = Sets.newTreeSet(); + newServers.add(groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().first()); + groupAdmin.moveServers(newServers, appInfo.getName()); + admin.assign(targetRegion.getRegionName()); + + //wait for region to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + + targetServer = ServerName.parseServerName(newServers.first().toString()); + targetRS = + admin.getConnection().getAdmin(targetServer); + assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + assertEquals(tableName, + ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable()); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java new file mode 100644 index 0000000..d5da85d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java @@ -0,0 +1,181 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + + +//This tests that GroupBasedBalancer will use data in zk +//to do balancing during master startup +//This does not test retain assignment +@Category(MediumTests.class) +public class TestGroupsOfflineMode { + private static final org.apache.commons.logging.Log LOG = LogFactory.getLog(TestGroupsOfflineMode.class); + private static HMaster master; + private static HBaseAdmin hbaseAdmin; + private static HBaseTestingUtility TEST_UTIL; + private static HBaseCluster cluster; + public final static long WAIT_TIMEOUT = 60000*5; + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + GroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + "1"); + TEST_UTIL.startMiniCluster(2, 3); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + master.balanceSwitch(false); + hbaseAdmin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + //wait till the balancer is in online mode + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((GroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() && + master.getServerManager().getOnlineServersList().size() >= 3; + } + }); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testOffline() throws Exception, InterruptedException { + //table should be after group table name + //so it gets assigned later + final TableName failoverTable = TableName.valueOf("testOffline"); + TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f")); + + GroupAdminClient groupAdmin = new GroupAdminClient(TEST_UTIL.getConfiguration()); + + final HRegionServer killRS = ((MiniHBaseCluster)cluster).getRegionServer(0); + final HRegionServer groupRS = ((MiniHBaseCluster)cluster).getRegionServer(1); + final HRegionServer failoverRS = ((MiniHBaseCluster)cluster).getRegionServer(2); + + String newGroup = "my_group"; + groupAdmin.addGroup(newGroup); + if(master.getAssignmentManager().getRegionStates().getRegionAssignments() + .containsValue(failoverRS.getServerName())) { + for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) { + hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), + Bytes.toBytes(failoverRS.getServerName().getServerName())); + } + LOG.info("Waiting for region unassignments on failover RS..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.getServerManager().getLoad(failoverRS.getServerName()) + .getRegionsLoad().size() > 0; + } + }); + } + + //move server to group and make sure all tables are assigned + groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getHostPort()), newGroup); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return groupRS.getNumberOfOnlineRegions() < 1 && + master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1; + } + }); + //move table to group and wait + groupAdmin.moveTables(Sets.newHashSet(GroupInfoManager.GROUP_TABLE_NAME), newGroup); + LOG.info("Waiting for move table..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return groupRS.getNumberOfOnlineRegions() == 1; + } + }); + + groupRS.stop("die"); + //race condition here + TEST_UTIL.getHBaseCluster().getMaster().stopMaster(); + LOG.info("Waiting for offline mode..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return TEST_UTIL.getHBaseCluster().getMaster() != null && + TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() && + TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && + TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() + <= 3; + } + }); + + + GroupInfoManager groupMgr = + ((GroupBasedLoadBalancer)TEST_UTIL.getHBaseCluster().getMaster().getLoadBalancer()) + .getGroupInfoManager(); + //make sure balancer is in offline mode, since this is what we're testing + assertFalse(groupMgr.isOnline()); + //verify the group affiliation that's loaded from ZK instead of tables + assertEquals(newGroup, + groupMgr.getGroupOfTable(GroupInfoManager.GROUP_TABLE_NAME)); + assertEquals(GroupInfo.DEFAULT_GROUP, groupMgr.getGroupOfTable(failoverTable)); + + //kill final regionserver to see the failover happens for all tables + //except GROUP table since it's group does not have any online RS + killRS.stop("die"); + master = TEST_UTIL.getHBaseCluster().getMaster(); + LOG.info("Waiting for new table assignment..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return failoverRS.getOnlineRegions(failoverTable).size() >= 1; + } + }); + assertEquals(0, failoverRS.getOnlineRegions(GroupInfoManager.GROUP_TABLE_NAME).size()); + + //need this for minicluster to shutdown cleanly + master.stopMaster(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java new file mode 100644 index 0000000..2b0c2df --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/VerifyingGroupAdminClient.java @@ -0,0 +1,155 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.group; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HConnectionManager; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.junit.Assert; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; + +public class VerifyingGroupAdminClient implements GroupAdmin { + private HTableInterface table; + private ZooKeeperWatcher zkw; + private GroupSerDe serDe; + private GroupAdmin wrapped; + + public VerifyingGroupAdminClient(GroupAdmin groupAdmin, Configuration conf) + throws IOException { + wrapped = groupAdmin; + table = HConnectionManager.createConnection(conf).getTable(GroupInfoManager.GROUP_TABLE_NAME); + zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null); + serDe = new GroupSerDe(); + } + + @Override + public void addGroup(String groupName) throws IOException { + wrapped.addGroup(groupName); + verify(); + } + + @Override + public GroupInfo getGroupInfo(String groupName) throws IOException { + return wrapped.getGroupInfo(groupName); + } + + @Override + public GroupInfo getGroupInfoOfTable(TableName tableName) throws IOException { + return wrapped.getGroupInfoOfTable(tableName); + } + + @Override + public void moveServers(Set servers, String targetGroup) throws IOException { + wrapped.moveServers(servers, targetGroup); + verify(); + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + wrapped.moveTables(tables, targetGroup); + verify(); + } + + @Override + public void removeGroup(String name) throws IOException { + wrapped.removeGroup(name); + verify(); + } + + @Override + public boolean balanceGroup(String name) throws IOException { + return wrapped.balanceGroup(name); + } + + @Override + public List listGroups() throws IOException { + return wrapped.listGroups(); + } + + @Override + public GroupInfo getGroupOfServer(HostPort hostPort) throws IOException { + return wrapped.getGroupOfServer(hostPort); + } + + public void verify() throws IOException { + Get get = new Get(GroupInfoManager.ROW_KEY); + get.addFamily(GroupInfoManager.META_FAMILY_BYTES); + Map groupMap = Maps.newHashMap(); + Set zList = Sets.newHashSet(); + + Result result = table.get(get); + if(!result.isEmpty()) { + NavigableMap> dataMap = + result.getNoVersionMap(); + for(byte[] groupNameBytes: + dataMap.get(GroupInfoManager.META_FAMILY_BYTES).keySet()) { + RSGroupProtos.GroupInfo proto = + RSGroupProtos.GroupInfo.parseFrom( + dataMap.get(GroupInfoManager.META_FAMILY_BYTES).get(groupNameBytes)); + GroupInfo groupInfo = ProtobufUtil.toGroupInfo(proto); + groupMap.put(groupInfo.getName(), groupInfo); + } + } + Assert.assertEquals(Sets.newHashSet(groupMap.values()), + Sets.newHashSet(wrapped.listGroups())); + try { + String groupBasePath = ZKUtil.joinZNode(zkw.baseZNode, "groupInfo"); + for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { + byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + zList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.GroupInfo.parseFrom(bis))); + } + } + Assert.assertEquals(zList.size(), groupMap.size()); + for(GroupInfo groupInfo: zList) { + Assert.assertTrue(groupMap.get(groupInfo.getName()).equals(groupInfo)); + } + } catch (KeeperException e) { + throw new IOException("ZK verification failed", e); + } catch (DeserializationException e) { + throw new IOException("ZK verification failed", e); + } + } + + @Override + public void close() throws IOException { + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index e40a32b..234cf4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -26,10 +26,15 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,6 +43,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; @@ -149,7 +155,7 @@ public class TestAssignmentManagerOnCluster { TEST_UTIL.deleteTable(Bytes.toBytes(table)); } } - + // Simulate a scenario where the AssignCallable and SSH are trying to assign a region @Test (timeout=60000) public void testAssignRegionBySSH() throws Exception { @@ -179,15 +185,15 @@ public class TestAssignmentManagerOnCluster { TEST_UTIL.getHBaseCluster().killRegionServer(controlledServer); TEST_UTIL.getHBaseCluster().waitForRegionServerToStop(controlledServer, -1); AssignmentManager am = master.getAssignmentManager(); - + // Simulate the AssignCallable trying to assign the region. Have the region in OFFLINE state, - // but not in transition and the server is the dead 'controlledServer' + // but not in transition and the server is the dead 'controlledServer' regionStates.createRegionState(hri, State.OFFLINE, controlledServer); am.assign(hri, true, true); // Region should remain in OFFLINE and go to transition assertEquals(State.OFFLINE, regionStates.getRegionState(hri).getState()); assertTrue (regionStates.isRegionInTransition(hri)); - + master.enableSSH(true); am.waitForAssignment(hri); assertTrue (regionStates.getRegionState(hri).isOpened()); @@ -211,7 +217,7 @@ public class TestAssignmentManagerOnCluster { TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20); TEST_UTIL.getMiniHBaseCluster().stopMaster(0); TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect - + ServerName deadServer = null; HMaster master = null; try { @@ -451,7 +457,7 @@ public class TestAssignmentManagerOnCluster { assertTrue(am.waitForAssignment(hri)); ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri, sn, 6000); - + MyRegionObserver.preCloseEnabled.set(true); am.unassign(hri); RegionState state = am.getRegionStates().getRegionState(hri); @@ -566,6 +572,105 @@ public class TestAssignmentManagerOnCluster { } /** + * This tests round-robin assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRoundRobinAssignmentFailed() throws Exception { + String table = "testRoundRobinAssignmentFailed"; + try { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaEditor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri.getEncodedName(); + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + // round-robin assignment but balancer cannot find a plan + // assignment should fail + am.assign(Arrays.asList(hri)); + + // if bulk assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(Arrays.asList(hri)); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(Bytes.toBytes(table)); + } + } + + /** + * This tests retain assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRetainAssignmentFailed() throws Exception { + String table = "testRetainAssignmentFailed"; + try { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaEditor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri.getEncodedName(); + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + + Map regions = new HashMap(); + ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + regions.put(hri, dest); + // retainAssignment but balancer cannot find a plan + // assignment should fail + am.assign(regions); + + // if retain assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try retainAssigment again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(regions); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + + // it retains on same server as specified + assertEquals(serverName, dest); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(Bytes.toBytes(table)); + } + } + + /** * This tests region open failure which is not recoverable */ @Test (timeout=60000) @@ -646,7 +751,7 @@ public class TestAssignmentManagerOnCluster { Bytes.toBytes(rs.getServerName().getServerName())); am.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO); } - + am.regionOffline(hri); ZooKeeperWatcher zkw = TEST_UTIL.getHBaseCluster().getMaster().getZooKeeper(); am.getRegionStates().updateRegionState(hri, State.PENDING_OPEN, destServerName); @@ -787,7 +892,7 @@ public class TestAssignmentManagerOnCluster { List regions = new ArrayList(); regions.add(hri); am.assign(destServerName, regions); - + // let region open continue MyRegionObserver.postOpenEnabled.set(false); @@ -1017,6 +1122,31 @@ public class TestAssignmentManagerOnCluster { // For this region, if specified, always assign to nowhere static volatile String controledRegion = null; + + @Override + public Map> roundRobinAssignment( + List regions, List servers) { + if (regions.get(0).getEncodedName().equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, regions); + return m; + } + return super.roundRobinAssignment(regions, servers); + } + + @Override + public Map> retainAssignment( + Map regions, List servers) { + for (HRegionInfo hri : regions.keySet()) { + if (hri.getEncodedName().equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, Lists.newArrayList(regions.keySet())); + return m; + } + } + return super.retainAssignment(regions, servers); + } + @Override public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { @@ -1047,7 +1177,7 @@ public class TestAssignmentManagerOnCluster { } } } - + public static class MyRegionServer extends MiniHBaseClusterRegionServer { static volatile ServerName abortedServer = null; static volatile boolean simulateRetry; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index d39fb86..b829819 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -56,6 +56,8 @@ import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.group.GroupAdmin; +import org.apache.hadoop.hbase.group.GroupAdminServer; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -314,32 +316,29 @@ public class TestCatalogJanitor { @Override public void createNamespace(NamespaceDescriptor descriptor) throws IOException { - //To change body of implemented methods use File | Settings | File Templates. } @Override public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException { - //To change body of implemented methods use File | Settings | File Templates. } @Override public void deleteNamespace(String name) throws IOException { - //To change body of implemented methods use File | Settings | File Templates. } @Override public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override public List listNamespaceDescriptors() throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override public List listTableDescriptorsByNamespace(String name) throws IOException { - return null; //To change body of implemented methods use File | Settings | File Templates. + return null; } @Override @@ -348,6 +347,16 @@ public class TestCatalogJanitor { } @Override + public LoadBalancer getLoadBalancer() { + return null; + } + + @Override + public GroupAdminServer getGroupAdminServer() { + return null; + } + + @Override public void deleteTable(TableName tableName) throws IOException { } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java new file mode 100644 index 0000000..72bf887 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestGroupBasedLoadBalancer.java @@ -0,0 +1,588 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Lists; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HostPort; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.group.GroupBasedLoadBalancer; +import org.apache.hadoop.hbase.group.GroupInfo; +import org.apache.hadoop.hbase.group.GroupInfoManager; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +//TODO use stochastic based load balancer instead +@Category(SmallTests.class) +public class TestGroupBasedLoadBalancer { + + private static final Log LOG = LogFactory.getLog(TestGroupBasedLoadBalancer.class); + private static GroupBasedLoadBalancer loadBalancer; + private static SecureRandom rand; + + static String[] groups = new String[] { GroupInfo.DEFAULT_GROUP, "dg2", "dg3", + "dg4" }; + static TableName[] tables = + new TableName[] { TableName.valueOf("dt1"), + TableName.valueOf("dt2"), + TableName.valueOf("dt3"), + TableName.valueOf("dt4")}; + static List servers; + static Map groupMap; + static Map tableMap; + static List tableDescs; + int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 }; + static int regionId = 0; + + @BeforeClass + public static void beforeAllTests() throws Exception { + rand = new SecureRandom(); + servers = generateServers(7); + groupMap = constructGroupInfo(servers, groups); + tableMap = new HashMap(); + tableDescs = constructTableDesc(); + Configuration conf = HBaseConfiguration.create(); + conf.set("hbase.regions.slop", "0"); + conf.set("hbase.group.grouploadbalancer.class", SimpleLoadBalancer.class.getCanonicalName()); + loadBalancer = new GroupBasedLoadBalancer(getMockedGroupInfoManager()); + loadBalancer.setMasterServices(getMockedMaster()); + loadBalancer.setConf(conf); + loadBalancer.initialize(); + } + + /** + * Test the load balancing algorithm. + * + * Invariant is that all servers of the group should be hosting either floor(average) or + * ceiling(average) + * + * @throws Exception + */ + @Test + public void testBalanceCluster() throws Exception { + Map> servers = mockClusterServers(); + ArrayListMultimap list = convertToGroupBasedMap(servers); + LOG.info("Mock Cluster : " + printStats(list)); + List plans = loadBalancer.balanceCluster(servers); + ArrayListMultimap balancedCluster = reconcile( + list, plans); + LOG.info("Mock Balance : " + printStats(balancedCluster)); + assertClusterAsBalanced(balancedCluster); + } + + /** + * Invariant is that all servers of a group have load between floor(avg) and + * ceiling(avg) number of regions. + */ + private void assertClusterAsBalanced( + ArrayListMultimap groupLoadMap) { + for (String gName : groupLoadMap.keySet()) { + List groupLoad = groupLoadMap.get(gName); + int numServers = groupLoad.size(); + int numRegions = 0; + int maxRegions = 0; + int minRegions = Integer.MAX_VALUE; + for (ServerAndLoad server : groupLoad) { + int nr = server.getLoad(); + if (nr > maxRegions) { + maxRegions = nr; + } + if (nr < minRegions) { + minRegions = nr; + } + numRegions += nr; + } + if (maxRegions - minRegions < 2) { + // less than 2 between max and min, can't balance + return; + } + int min = numRegions / numServers; + int max = numRegions % numServers == 0 ? min : min + 1; + + for (ServerAndLoad server : groupLoad) { + assertTrue(server.getLoad() <= max); + assertTrue(server.getLoad() >= min); + } + } + } + + /** + * Tests immediate assignment. + * + * Invariant is that all regions have an assignment. + * + * @throws Exception + */ + @Test + public void testImmediateAssignment() throws Exception { + List regions = randomRegions(20); + Map assignments = loadBalancer + .immediateAssignment(regions, servers); + assertImmediateAssignment(regions, servers, assignments); + } + + /** + * All regions have an assignment. + * + * @param regions + * @param servers + * @param assignments + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertImmediateAssignment(List regions, + List servers, Map assignments) + throws FileNotFoundException, IOException { + for (HRegionInfo region : regions) { + assertTrue(assignments.containsKey(region)); + ServerName server = assignments.get(region); + TableName tableName = region.getTable(); + + String groupName = + loadBalancer.getGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(server.getHostPort())); + } + } + + /** + * Tests the bulk assignment used during cluster startup. + * + * Round-robin. Should yield a balanced cluster so same invariant as the + * load balancer holds, all servers holding either floor(avg) or + * ceiling(avg). + * + * @throws Exception + */ + @Test + public void testBulkAssignment() throws Exception { + List regions = randomRegions(25); + Map> assignments = loadBalancer + .roundRobinAssignment(regions, servers); + //test empty region/servers scenario + //this should not throw an NPE + loadBalancer.roundRobinAssignment(regions, + Collections.EMPTY_LIST); + //test regular scenario + assertTrue(assignments.keySet().size() == servers.size()); + for (ServerName sn : assignments.keySet()) { + List regionAssigned = assignments.get(sn); + for (HRegionInfo region : regionAssigned) { + TableName tableName = region.getTable(); + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(sn.getHostPort())); + } + } + ArrayListMultimap loadMap = convertToGroupBasedMap(assignments); + assertClusterAsBalanced(loadMap); + } + + /** + * Test the cluster startup bulk assignment which attempts to retain + * assignment info. + * + * @throws Exception + */ + @Test + public void testRetainAssignment() throws Exception { + // Test simple case where all same servers are there + Map> currentAssignments = mockClusterServers(); + Map inputForTest = new HashMap(); + for (ServerName sn : currentAssignments.keySet()) { + for (HRegionInfo region : currentAssignments.get(sn)) { + inputForTest.put(region, sn); + } + } + //verify region->null server assignment is handled + inputForTest.put(randomRegions(1).get(0), null); + Map> newAssignment = loadBalancer + .retainAssignment(inputForTest, servers); + assertRetainedAssignment(inputForTest, servers, newAssignment); + } + + /** + * Asserts a valid retained assignment plan. + *

+ * Must meet the following conditions: + *

    + *
  • Every input region has an assignment, and to an online server + *
  • If a region had an existing assignment to a server with the same + * address a a currently online server, it will be assigned to it + *
+ * + * @param existing + * @param assignment + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertRetainedAssignment( + Map existing, List servers, + Map> assignment) + throws FileNotFoundException, IOException { + // Verify condition 1, every region assigned, and to online server + Set onlineServerSet = new TreeSet(servers); + Set assignedRegions = new TreeSet(); + for (Map.Entry> a : assignment.entrySet()) { + assertTrue( + "Region assigned to server that was not listed as online", + onlineServerSet.contains(a.getKey())); + for (HRegionInfo r : a.getValue()) + assignedRegions.add(r); + } + assertEquals(existing.size(), assignedRegions.size()); + + // Verify condition 2, every region must be assigned to correct server. + Set onlineHostNames = new TreeSet(); + for (ServerName s : servers) { + onlineHostNames.add(s.getHostname()); + } + + for (Map.Entry> a : assignment.entrySet()) { + ServerName currentServer = a.getKey(); + for (HRegionInfo r : a.getValue()) { + ServerName oldAssignedServer = existing.get(r); + TableName tableName = r.getTable(); + String groupName = + getMockedGroupInfoManager().getGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + GroupInfo gInfo = getMockedGroupInfoManager().getGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(currentServer.getHostPort())); + if (oldAssignedServer != null + && onlineHostNames.contains(oldAssignedServer + .getHostname())) { + // this region was previously assigned somewhere, and that + // host is still around, then the host must have been is a + // different group. + if (!oldAssignedServer.getHostPort().equals(currentServer.getHostPort())) { + assertFalse(gInfo.containsServer(oldAssignedServer.getHostPort())); + } + } + } + } + } + + private String printStats( + ArrayListMultimap groupBasedLoad) { + StringBuffer sb = new StringBuffer(); + sb.append("\n"); + for (String groupName : groupBasedLoad.keySet()) { + sb.append("Stats for group: " + groupName); + sb.append("\n"); + sb.append(groupMap.get(groupName).getServers()); + sb.append("\n"); + List groupLoad = groupBasedLoad.get(groupName); + int numServers = groupLoad.size(); + int totalRegions = 0; + sb.append("Per Server Load: \n"); + for (ServerAndLoad sLoad : groupLoad) { + sb.append("Server :" + sLoad.getServerName() + " Load : " + + sLoad.getLoad() + "\n"); + totalRegions += sLoad.getLoad(); + } + sb.append(" Group Statistics : \n"); + float average = (float) totalRegions / numServers; + int max = (int) Math.ceil(average); + int min = (int) Math.floor(average); + sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + + average + " max=" + max + " min=" + min + "]"); + sb.append("\n"); + sb.append("==============================="); + sb.append("\n"); + } + return sb.toString(); + } + + private ArrayListMultimap convertToGroupBasedMap( + final Map> serversMap) throws IOException { + ArrayListMultimap loadMap = ArrayListMultimap + .create(); + for (GroupInfo gInfo : getMockedGroupInfoManager().listGroups()) { + Set groupServers = gInfo.getServers(); + for (HostPort hostPort : groupServers) { + ServerName actual = null; + for(ServerName entry: servers) { + if(entry.getHostPort().equals(hostPort)) { + actual = entry; + break; + } + } + List regions = serversMap.get(actual); + assertTrue("No load for " + actual, regions != null); + loadMap.put(gInfo.getName(), + new ServerAndLoad(actual, regions.size())); + } + } + return loadMap; + } + + private ArrayListMultimap reconcile( + ArrayListMultimap previousLoad, + List plans) { + ArrayListMultimap result = ArrayListMultimap + .create(); + result.putAll(previousLoad); + if (plans != null) { + for (RegionPlan plan : plans) { + ServerName source = plan.getSource(); + updateLoad(result, source, -1); + ServerName destination = plan.getDestination(); + updateLoad(result, destination, +1); + } + } + return result; + } + + private void updateLoad( + ArrayListMultimap previousLoad, + final ServerName sn, final int diff) { + for (String groupName : previousLoad.keySet()) { + ServerAndLoad newSAL = null; + ServerAndLoad oldSAL = null; + for (ServerAndLoad sal : previousLoad.get(groupName)) { + if (ServerName.isSameHostnameAndPort(sn, sal.getServerName())) { + oldSAL = sal; + newSAL = new ServerAndLoad(sn, sal.getLoad() + diff); + break; + } + } + if (newSAL != null) { + previousLoad.remove(groupName, oldSAL); + previousLoad.put(groupName, newSAL); + break; + } + } + } + + private Map> mockClusterServers() throws IOException { + assertTrue(servers.size() == regionAssignment.length); + Map> assignment = new TreeMap>(); + for (int i = 0; i < servers.size(); i++) { + int numRegions = regionAssignment[i]; + List regions = assignedRegions(numRegions, servers.get(i)); + assignment.put(servers.get(i), regions); + } + return assignment; + } + + /** + * Generate a list of regions evenly distributed between the tables. + * + * @param numRegions The number of regions to be generated. + * @return List of HRegionInfo. + */ + private List randomRegions(int numRegions) { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + rand.nextBytes(start); + rand.nextBytes(end); + int regionIdx = rand.nextInt(tables.length); + for (int i = 0; i < numRegions; i++) { + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + int tableIndex = (i + regionIdx) % tables.length; + HRegionInfo hri = new HRegionInfo( + tables[tableIndex], start, end, false, regionId++); + regions.add(hri); + } + return regions; + } + + /** + * Generate assigned regions to a given server using group information. + * + * @param numRegions the num regions to generate + * @param sn the servername + * @return the list of regions + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + private List assignedRegions(int numRegions, ServerName sn) throws IOException { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + for (int i = 0; i < numRegions; i++) { + TableName tableName = getTableName(sn); + HRegionInfo hri = new HRegionInfo( + tableName, start, end, false, + regionId++); + regions.add(hri); + } + return regions; + } + + private static List generateServers(int numServers) { + List servers = new ArrayList(numServers); + for (int i = 0; i < numServers; i++) { + String host = "server" + rand.nextInt(100000); + int port = rand.nextInt(60000); + servers.add(ServerName.valueOf(host, port, -1)); + } + return servers; + } + + /** + * Construct group info, with each group having at least one server. + * + * @param servers the servers + * @param groups the groups + * @return the map + */ + private static Map constructGroupInfo( + List servers, String[] groups) { + assertTrue(servers != null); + assertTrue(servers.size() >= groups.length); + int index = 0; + Map groupMap = new HashMap(); + for (String grpName : groups) { + GroupInfo groupInfo = new GroupInfo(grpName); + groupInfo.addServer(servers.get(index).getHostPort()); + groupMap.put(grpName, groupInfo); + index++; + } + while (index < servers.size()) { + int grpIndex = rand.nextInt(groups.length); + groupMap.get(groups[grpIndex]).addServer( + servers.get(index).getHostPort()); + index++; + } + return groupMap; + } + + /** + * Construct table descriptors evenly distributed between the groups. + * + * @return the list + */ + private static List constructTableDesc() { + List tds = Lists.newArrayList(); + int index = rand.nextInt(groups.length); + for (int i = 0; i < tables.length; i++) { + HTableDescriptor htd = new HTableDescriptor(tables[i]); + int grpIndex = (i + index) % groups.length ; + String groupName = groups[grpIndex]; + tableMap.put(tables[i], groupName); + tds.add(htd); + } + return tds; + } + + private static MasterServices getMockedMaster() throws IOException { + TableDescriptors tds = Mockito.mock(TableDescriptors.class); + Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); + Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); + Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); + Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); + MasterServices services = Mockito.mock(HMaster.class); + Mockito.when(services.getTableDescriptors()).thenReturn(tds); + AssignmentManager am = Mockito.mock(AssignmentManager.class); + Mockito.when(services.getAssignmentManager()).thenReturn(am); + return services; + } + + private static GroupInfoManager getMockedGroupInfoManager() throws IOException { + GroupInfoManager gm = Mockito.mock(GroupInfoManager.class); + Mockito.when(gm.getGroup(groups[0])).thenReturn( + groupMap.get(groups[0])); + Mockito.when(gm.getGroup(groups[1])).thenReturn( + groupMap.get(groups[1])); + Mockito.when(gm.getGroup(groups[2])).thenReturn( + groupMap.get(groups[2])); + Mockito.when(gm.getGroup(groups[3])).thenReturn( + groupMap.get(groups[3])); + Mockito.when(gm.listGroups()).thenReturn( + Lists.newLinkedList(groupMap.values())); + Mockito.when(gm.isOnline()).thenReturn(true); + Mockito.when(gm.getGroupOfTable(Mockito.any(TableName.class))) + .thenAnswer(new Answer() { + @Override + public String answer(InvocationOnMock invocation) throws Throwable { + return tableMap.get(invocation.getArguments()[0]); + } + }); + return gm; + } + + private TableName getTableName(ServerName sn) throws IOException { + TableName tableName = null; + GroupInfoManager gm = getMockedGroupInfoManager(); + GroupInfo groupOfServer = null; + for(GroupInfo gInfo : gm.listGroups()){ + if(gInfo.containsServer(sn.getHostPort())){ + groupOfServer = gInfo; + break; + } + } + + for(HTableDescriptor desc : tableDescs){ + if(gm.getGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){ + tableName = desc.getTableName(); + } + } + return tableName; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 8f4d8d7..e521424 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -2488,4 +2488,79 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE); } + + @Test + public void testMoveServers() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null), + null, null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testMoveTables() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null), + null, null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testAddGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preAddGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testRemoveGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preRemoveGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testBalanceGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preBalanceGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } } diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb index 70ab8fe..b56b93f 100644 --- a/hbase-shell/src/main/ruby/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase.rb @@ -89,3 +89,4 @@ require 'hbase/table' require 'hbase/replication_admin' require 'hbase/security' require 'hbase/visibility_labels' +require 'hbase/group_admin' diff --git a/hbase-shell/src/main/ruby/hbase/group_admin.rb b/hbase-shell/src/main/ruby/hbase/group_admin.rb new file mode 100644 index 0000000..bb4cefe --- /dev/null +++ b/hbase-shell/src/main/ruby/hbase/group_admin.rb @@ -0,0 +1,121 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java +java_import org.apache.hadoop.hbase.util.Pair + +# Wrapper for org.apache.hadoop.hbase.group.GroupAdminClient +# Which is an API to manage region server groups + +module Hbase + class GroupAdmin + include HBaseConstants + + def initialize(configuration, formatter) + @admin = org.apache.hadoop.hbase.group.GroupAdminClient.new(configuration) + @conf = configuration + @formatter = formatter + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of groups in hbase + def listGroups + @admin.listGroups.map { |g| g.getName } + end + #---------------------------------------------------------------------------------------------- + # get a group's information + def getGroup(group_name) + group = @admin.getGroupInfo(group_name) + res = {} + if block_given? + yield("Servers:") + else + res += v + end + group.getServers.each do |v| + if block_given? + yield(v.toString) + else + res += v.toString + end + end + if block_given? + yield("Tables:") + else + res += v + end + group.getTables.each do |v| + if block_given? + yield(v.toString) + else + res += v.toString + end + end + end + #---------------------------------------------------------------------------------------------- + # add a group + def addGroup(group_name) + @admin.addGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # remove a group + def removeGroup(group_name) + @admin.removeGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # balance a group + def balanceGroup(group_name) + @admin.balanceGroup(group_name) + end + #---------------------------------------------------------------------------------------------- + # move server to a group + def moveServers(dest, *args) + servers = java.util.HashSet.new() + args[0].each do |s| + servers.add(org.apache.hadoop.hbase.HostPort.valueOf(s)) + end + @admin.moveServers(servers, dest) + end + #---------------------------------------------------------------------------------------------- + # move server to a group + def moveTables(dest, *args) + tables = java.util.HashSet.new(); + args[0].each do |s| + tables.add(org.apache.hadoop.hbase.TableName.valueOf(s)) + end + @admin.moveTables(tables,dest) + end + #---------------------------------------------------------------------------------------------- + # get group of server + def getGroupOfServer(server) + @admin.getGroupOfServer(org.apache.hadoop.hbase.HostPort.valueOf(server)) + end + #---------------------------------------------------------------------------------------------- + # get group of server + def getGroupOfTable(table) + @admin.getGroupInfoOfTable(org.apache.hadoop.hbase.TableName.valueOf(table)) + end + #---------------------------------------------------------------------------------------------- + # get list tables of groups + def listTablesOfGroup(group_name) + @admin.listTablesOfGroup(group_name) + end + end +end diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb index e75535e..458176f 100644 --- a/hbase-shell/src/main/ruby/hbase/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase/hbase.rb @@ -44,6 +44,10 @@ module Hbase ::Hbase::Admin.new(configuration, formatter) end + def group_admin(formatter) + ::Hbase::GroupAdmin.new(configuration, formatter) + end + # Create new one each time def table(table, shell) ::Hbase::Table.new(configuration, table, shell) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index f56499c..c4500af 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -87,6 +87,10 @@ module Shell @hbase_admin ||= hbase.admin(formatter) end + def group_admin + @group_admin ||= hbase.group_admin(formatter) + end + def hbase_table(name) hbase.table(name, self) end @@ -386,3 +390,20 @@ Shell.load_command_group( set_visibility ] ) + +Shell.load_command_group( + 'group', + :full_name => 'Groups', + :comment => "NOTE: Above commands are only applicable if running with the Groups setup", + :commands => %w[ + list_groups + get_group + add_group + remove_group + balance_group + move_group_servers + move_group_tables + get_server_group + get_table_group + ] +) diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index 1b079fb..41538b9 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -54,6 +54,10 @@ module Shell @shell.hbase_admin end + def group_admin + @shell.group_admin + end + def table(name) @shell.hbase_table(name) end diff --git a/hbase-shell/src/main/ruby/shell/commands/add_group.rb b/hbase-shell/src/main/ruby/shell/commands/add_group.rb new file mode 100644 index 0000000..7f91ee5 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/add_group.rb @@ -0,0 +1,39 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class AddGroup < Command + def help + return <<-EOF +Create a new region server group. + +Example: + + hbase> add_group 'my_group' +EOF + end + + def command(group_name) + group_admin.addGroup(group_name) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/balance_group.rb b/hbase-shell/src/main/ruby/shell/commands/balance_group.rb new file mode 100644 index 0000000..4c59f63 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/balance_group.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class BalanceGroup < Command + def help + return <<-EOF +Balance a region server group + + hbase> group_balance 'my_group' +EOF + end + + def command(group_name) + group_admin.balanceGroup(group_name) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_group.rb b/hbase-shell/src/main/ruby/shell/commands/get_group.rb new file mode 100644 index 0000000..5ed8226 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_group.rb @@ -0,0 +1,44 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetGroup < Command + def help + return <<-EOF +Get a region server group's information. + +Example: + + hbase> get_group 'default' +EOF + end + + def command(group_name) + now = Time.now + formatter.header([ "GROUP INFORMATION" ]) + group_admin.getGroup(group_name) do |s| + formatter.row([ s ]) + end + formatter.footer(now) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_group.rb b/hbase-shell/src/main/ruby/shell/commands/get_server_group.rb new file mode 100644 index 0000000..c78d4d2 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_server_group.rb @@ -0,0 +1,40 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetServerGroup < Command + def help + return <<-EOF +Get the group name the given region server is a member of. + + hbase> get_server_group 'server1:port1' +EOF + end + + def command(server) + now = Time.now + groupName = group_admin.getGroupOfServer(server).getName + formatter.row([ groupName ]) + formatter.footer(now,1) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_group.rb b/hbase-shell/src/main/ruby/shell/commands/get_table_group.rb new file mode 100644 index 0000000..dd8766d --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_table_group.rb @@ -0,0 +1,41 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetTableGroup < Command + def help + return <<-EOF +Get the group name the given table is a member of. + + hbase> get_table_group 'myTable' +EOF + end + + def command(table) + now = Time.now + groupName = + group_admin.getGroupOfTable(table).getName + formatter.row([ groupName ]) + formatter.footer(now,1) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/list_groups.rb b/hbase-shell/src/main/ruby/shell/commands/list_groups.rb new file mode 100644 index 0000000..2e7dd08 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/list_groups.rb @@ -0,0 +1,50 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ListGroups < Command + def help + return <<-EOF +List all region server groups. Optional regular expression parameter could +be used to filter the output. + +Example: + + hbase> list_groups + hbase> list_groups 'abc.*' +EOF + end + + def command(regex = ".*") + now = Time.now + formatter.header([ "GROUPS" ]) + + regex = /#{regex}/ unless regex.is_a?(Regexp) + list = group_admin.listGroups.grep(regex) + list.each do |group| + formatter.row([ group ]) + end + + formatter.footer(now, list.size) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb b/hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb new file mode 100644 index 0000000..5e5c850 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_group_servers.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveGroupServers < Command + def help + return <<-EOF +Reassign a region server from one group to another. + + hbase> move_group_servers 'dest',['server1:port','server2:port'] +EOF + end + + def command(dest, *servers) + group_admin.moveServers(dest, *servers) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb b/hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb new file mode 100644 index 0000000..f495f2c --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_group_tables.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveGroupTables < Command + def help + return <<-EOF +Reassign tables from one group to another. + + hbase> move_group_tables 'dest',['table1','table2'] +EOF + end + + def command(dest, *servers) + group_admin.moveTables(dest, *servers) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_group.rb b/hbase-shell/src/main/ruby/shell/commands/remove_group.rb new file mode 100644 index 0000000..66863a4 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/remove_group.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class RemoveGroup < Command + def help + return <<-EOF +Remove a group. + + hbase> remove_group 'my_group' +EOF + end + + def command(group_name) + group_admin.removeGroup(group_name) + end + end + end +end